mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 05:49:52 +00:00
Compare commits
210 Commits
v1.9.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38d9c8eb1a | ||
|
|
71a153aba4 | ||
|
|
9beaf17536 | ||
|
|
a32fb6f5f8 | ||
|
|
85ce427969 | ||
|
|
c8ef2dcce3 | ||
|
|
b5b0a9cd81 | ||
|
|
e0171abca9 | ||
|
|
4773bf1895 | ||
|
|
7602ac68f8 | ||
|
|
7f996f3bdb | ||
|
|
78dc389125 | ||
|
|
34ed1100de | ||
|
|
051898442c | ||
|
|
89cd9b89b6 | ||
|
|
df2b9aedc8 | ||
|
|
6f884cd817 | ||
|
|
7e11ab4ada | ||
|
|
9b087c653c | ||
|
|
d79588e6c1 | ||
|
|
dc1e14f7a7 | ||
|
|
8191ef3ea1 | ||
|
|
902506dd73 | ||
|
|
3f98753bfd | ||
|
|
b2884205e7 | ||
|
|
fb1ade6d9e | ||
|
|
0d212fc3b5 | ||
|
|
40dd6507df | ||
|
|
cc958d3e5d | ||
|
|
9d036f3053 | ||
|
|
7b886d11bb | ||
|
|
17df36a3e6 | ||
|
|
83bcd13659 | ||
|
|
b3b2c73764 | ||
|
|
afbdaf8ecb | ||
|
|
fe15a36ed9 | ||
|
|
c91142485e | ||
|
|
61c519dcf2 | ||
|
|
0fad119375 | ||
|
|
48b9d94c87 | ||
|
|
47919520f5 | ||
|
|
e0a5df297d | ||
|
|
80e3fd1095 | ||
|
|
9f04dfdec9 | ||
|
|
36c480f643 | ||
|
|
850bc49d27 | ||
|
|
a98c137243 | ||
|
|
198155027d | ||
|
|
641efe9930 | ||
|
|
67a8bef6ea | ||
|
|
9d65de7d61 | ||
|
|
63da8390f1 | ||
|
|
b51eb214c2 | ||
|
|
1fac61ef57 | ||
|
|
292962d34c | ||
|
|
e239f32ae0 | ||
|
|
ee8048583b | ||
|
|
1db6846c01 | ||
|
|
0698e82b30 | ||
|
|
8e09e641bf | ||
|
|
bb1ac89327 | ||
|
|
03b5bdec24 | ||
|
|
28995cd5d4 | ||
|
|
1133a2a395 | ||
|
|
28175104d7 | ||
|
|
d0cf39d860 | ||
|
|
71fa1f441f | ||
|
|
f17eafe85b | ||
|
|
4517ea0b7b | ||
|
|
58bccf3882 | ||
|
|
e71305f7bb | ||
|
|
f0c08985b3 | ||
|
|
ae44ecd570 | ||
|
|
92e3146aa0 | ||
|
|
f5aaabd5cc | ||
|
|
960713da32 | ||
|
|
60ecf7a031 | ||
|
|
b51f8ea200 | ||
|
|
e024c43892 | ||
|
|
9c6cbc94c7 | ||
|
|
fb4c49739f | ||
|
|
3eb9d71d7f | ||
|
|
6e6104ff8b | ||
|
|
46d48295fb | ||
|
|
c093484820 | ||
|
|
3212bbed6f | ||
|
|
b72a5c98a9 | ||
|
|
f6d587d816 | ||
|
|
40ba7a27af | ||
|
|
278be5a5d0 | ||
|
|
dc3f2b6cec | ||
|
|
b5ac534960 | ||
|
|
661c9698ee | ||
|
|
35532b2404 | ||
|
|
1af1d9c261 | ||
|
|
bdf1930221 | ||
|
|
b665ac4c09 | ||
|
|
e62fcca5ed | ||
|
|
563c91a2fd | ||
|
|
cf29c73079 | ||
|
|
e1fdb4da03 | ||
|
|
d06bf27eb8 | ||
|
|
7e6264136c | ||
|
|
8410bfdd91 | ||
|
|
6136a2b9c3 | ||
|
|
16d4a81b79 | ||
|
|
794d6b4650 | ||
|
|
2b55a7231a | ||
|
|
62e698b567 | ||
|
|
f968b2a890 | ||
|
|
2739a29aea | ||
|
|
fe5c4091ee | ||
|
|
5a8d72635c | ||
|
|
88f6ff09f9 | ||
|
|
d5327bced1 | ||
|
|
6d3d9a3bb2 | ||
|
|
723351cec1 | ||
|
|
5c69302d75 | ||
|
|
bdbb46be5a | ||
|
|
6d564d4de8 | ||
|
|
01201df865 | ||
|
|
4c0e565038 | ||
|
|
03da797e42 | ||
|
|
757ec5dbf6 | ||
|
|
08c290170d | ||
|
|
08b27fc50e | ||
|
|
7738dbb335 | ||
|
|
9b6f5b6e75 | ||
|
|
632cebd74e | ||
|
|
ea9aa68b0f | ||
|
|
c476d62671 | ||
|
|
fce2cf9c72 | ||
|
|
9724da1ff2 | ||
|
|
955a59c864 | ||
|
|
ae50898b8a | ||
|
|
f3aee25c7c | ||
|
|
1983173b60 | ||
|
|
3411ebd462 | ||
|
|
4ccfb033fb | ||
|
|
2133fa36da | ||
|
|
a495155030 | ||
|
|
032fd15c10 | ||
|
|
e021b675e2 | ||
|
|
7ee3396575 | ||
|
|
5eace4078f | ||
|
|
ee60474d5a | ||
|
|
ff2a361a0a | ||
|
|
7ebff0f533 | ||
|
|
787e10873c | ||
|
|
a2f29acc7d | ||
|
|
ee84302b60 | ||
|
|
a169ccf8f3 | ||
|
|
89ae387d7b | ||
|
|
66fe7af769 | ||
|
|
feabfac2a7 | ||
|
|
cf354b7abd | ||
|
|
18a95f947e | ||
|
|
07da29fd37 | ||
|
|
9b40f0be2f | ||
|
|
869d496f18 | ||
|
|
166b587a77 | ||
|
|
0a42c33af9 | ||
|
|
90c5033886 | ||
|
|
d3ff6e2635 | ||
|
|
06cf25fb53 | ||
|
|
3a05dca94e | ||
|
|
7bbaffc4f4 | ||
|
|
2b948c177a | ||
|
|
d9dfc44888 | ||
|
|
ba23a9162f | ||
|
|
1eada90813 | ||
|
|
4b9ffac0cc | ||
|
|
a81460437a | ||
|
|
f36752a279 | ||
|
|
4e2dee4362 | ||
|
|
d58e59a57d | ||
|
|
3450c11a0d | ||
|
|
b2e7139331 | ||
|
|
3a808c2ed5 | ||
|
|
04169cac6e | ||
|
|
a99bd0c9e3 | ||
|
|
97c3eabacf | ||
|
|
fa2b15ff76 | ||
|
|
9e79da5e33 | ||
|
|
97cb423b52 | ||
|
|
32f24e8870 | ||
|
|
a863a0dccb | ||
|
|
67a4e04471 | ||
|
|
14b05e8064 | ||
|
|
e95123a2d4 | ||
|
|
ca1b0f34d1 | ||
|
|
28a5365945 | ||
|
|
73a668e99d | ||
|
|
61c28f5d47 | ||
|
|
eafd7e5518 | ||
|
|
2cfbeb2db8 | ||
|
|
b9cf626ea3 | ||
|
|
263d3264ba | ||
|
|
63dabfcf8b | ||
|
|
2eac0f463a | ||
|
|
c10b63dc71 | ||
|
|
b7e7374e71 | ||
|
|
08846d18cc | ||
|
|
049163fcec | ||
|
|
3039cd5a77 | ||
|
|
b42e664854 | ||
|
|
ad12a292a3 | ||
|
|
ee477d8877 | ||
|
|
dbe47d765a | ||
|
|
f1485781be |
68
.cirrus.yml
68
.cirrus.yml
@@ -23,20 +23,14 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-36"
|
||||
PRIOR_FEDORA_NAME: "fedora-35"
|
||||
UBUNTU_NAME: "ubuntu-2204"
|
||||
FEDORA_NAME: "fedora-37"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6340043416535040"
|
||||
IMAGE_SUFFIX: "c6300530360713216"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
@@ -81,14 +75,18 @@ doccheck_task:
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
|
||||
|
||||
osx_task:
|
||||
# Run for regular PRs and those with [CI:BUILD] but not [CI:DOCS]
|
||||
only_if: ¬_docs_multiarch >-
|
||||
# Don't run for docs-only or multi-arch image builds.
|
||||
# Also don't run on release-branches or their PRs,
|
||||
# since base container-image is not version-constrained.
|
||||
only_if: ¬_docs_or_release_branch >-
|
||||
($CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH ||
|
||||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH ) &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
macos_instance:
|
||||
image: catalina-xcode
|
||||
image: ghcr.io/cirruslabs/macos-ventura-base:latest
|
||||
setup_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
brew update
|
||||
@@ -105,7 +103,9 @@ osx_task:
|
||||
|
||||
cross_task:
|
||||
alias: cross
|
||||
only_if: *not_docs_multiarch
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance: &standardvm
|
||||
@@ -125,6 +125,42 @@ cross_task:
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
|
||||
|
||||
|
||||
ostree-rs-ext_task:
|
||||
alias: proxy_ostree_ext
|
||||
only_if: *not_docs_or_release_branch
|
||||
# WARNING: This task potentially performs a container image
|
||||
# build (on change) with runtime package installs. Therefore,
|
||||
# its behavior can be unpredictable and potentially flake-prone.
|
||||
# In case of emergency, uncomment the next statement to bypass.
|
||||
#
|
||||
# skip: $CI == "true"
|
||||
#
|
||||
depends_on:
|
||||
- validate
|
||||
# Ref: https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
|
||||
container:
|
||||
# The runtime image will be rebuilt on change
|
||||
dockerfile: contrib/cirrus/ostree_ext.dockerfile
|
||||
docker_arguments: # required build-args
|
||||
BASE_FQIN: quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
CIRRUS_IMAGE_VERSION: 1
|
||||
env:
|
||||
EXT_REPO_NAME: ostree-rs-ext
|
||||
EXT_REPO_HOME: $CIRRUS_WORKING_DIR/../$EXT_REPO_NAME
|
||||
EXT_REPO: https://github.com/ostreedev/${EXT_REPO_NAME}.git
|
||||
skopeo_build_script:
|
||||
- dnf builddep -y skopeo
|
||||
- make
|
||||
- make install
|
||||
proxy_ostree_ext_build_script:
|
||||
- git clone --depth 1 $EXT_REPO $EXT_REPO_HOME
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test --no-run
|
||||
proxy_ostree_ext_test_script:
|
||||
- cd $EXT_REPO_HOME
|
||||
- cargo test -- --nocapture --quiet
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
@@ -221,13 +257,12 @@ meta_task:
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: >-
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
@@ -249,6 +284,7 @@ success_task:
|
||||
- doccheck
|
||||
- osx
|
||||
- cross
|
||||
- proxy_ostree_ext
|
||||
- test_skopeo
|
||||
- image_build
|
||||
- meta
|
||||
|
||||
74
.github/renovate.json5
vendored
Normal file
74
.github/renovate.json5
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Renovate is a service similar to GitHub Dependabot, but with
|
||||
(fantastically) more configuration options. So many options
|
||||
in fact, if you're new I recommend glossing over this cheat-sheet
|
||||
prior to the official documentation:
|
||||
|
||||
https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
|
||||
|
||||
Configuration Update/Change Procedure:
|
||||
1. Make changes
|
||||
2. Manually validate changes (from repo-root):
|
||||
|
||||
podman run -it \
|
||||
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
||||
docker.io/renovate/renovate:latest \
|
||||
renovate-config-validator
|
||||
3. Commit.
|
||||
|
||||
Configuration Reference:
|
||||
https://docs.renovatebot.com/configuration-options/
|
||||
|
||||
Monitoring Dashboard:
|
||||
https://app.renovatebot.com/dashboard#github/containers
|
||||
|
||||
Note: The Renovate bot will create/manage it's business on
|
||||
branches named 'renovate/*'. Otherwise, and by
|
||||
default, the only the copy of this file that matters
|
||||
is the one on the `main` branch. No other branches
|
||||
will be monitored or touched in any way.
|
||||
*/
|
||||
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
|
||||
/*************************************************
|
||||
****** Global/general configuration options *****
|
||||
*************************************************/
|
||||
|
||||
// Re-use predefined sets of configuration options to DRY
|
||||
"extends": [
|
||||
// https://github.com/containers/automation/blob/main/renovate/defaults.json5
|
||||
"github>containers/automation//renovate/defaults.json5"
|
||||
],
|
||||
|
||||
// Permit automatic rebasing when base-branch changes by more than
|
||||
// one commit.
|
||||
"rebaseWhen": "behind-base-branch",
|
||||
|
||||
/*************************************************
|
||||
*** Repository-specific configuration options ***
|
||||
*************************************************/
|
||||
|
||||
// Don't leave dep. update. PRs "hanging", assign them to people.
|
||||
"assignees": ["containers/image-maintainers"], // same for skopeo
|
||||
|
||||
/*************************************************
|
||||
***** Golang-specific configuration options *****
|
||||
*************************************************/
|
||||
|
||||
"golang": {
|
||||
// N/B: LAST MATCHING RULE WINS
|
||||
// https://docs.renovatebot.com/configuration-options/#packagerules
|
||||
"packageRules": [
|
||||
// Package version retraction (https://go.dev/ref/mod#go-mod-file-retract)
|
||||
// is broken in Renovate
|
||||
// ref: https://github.com/renovatebot/renovate/issues/13012
|
||||
{
|
||||
"matchPackageNames": ["github.com/containers/common"],
|
||||
// Both v1.0.0 and v1.0.1 should be ignored.
|
||||
"allowedVersions": "!/v((1.0.0)|(1.0.1))$/"
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
109
.github/workflows/check_cirrus_cron.yml
vendored
109
.github/workflows/check_cirrus_cron.yml
vendored
@@ -3,103 +3,18 @@
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# CSV listing of e-mail addresses for delivery failure or error notices
|
||||
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '03 03 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Github workflow error on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
|
||||
19
.github/workflows/rerun_cirrus_cron.yml
vendored
Normal file
19
.github/workflows/rerun_cirrus_cron.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '01 01 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_rerun:
|
||||
uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@98ed4cb500039dbcccf4bd9bedada4d0187f2757 # v3
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
|
||||
30
Makefile
30
Makefile
@@ -28,10 +28,15 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
|
||||
# Note possibly non-obvious aspects of this:
|
||||
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
|
||||
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
|
||||
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
|
||||
# $(shell … ;) defeats that heuristic (recommended in
|
||||
# https://savannah.gnu.org/bugs/index.php?57625 ).
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
@@ -50,8 +55,6 @@ ifeq ($(GOOS), linux)
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
@@ -87,7 +90,7 @@ endif
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
@@ -194,7 +197,11 @@ shell:
|
||||
check: validate test-unit test-integration test-system
|
||||
|
||||
test-integration:
|
||||
$(CONTAINER_RUN) $(MAKE) test-integration-local
|
||||
# This is intended to be equal to $(CONTAINER_RUN), but with --cap-add=cap_mknod.
|
||||
# --cap-add=cap_mknod is important to allow skopeo to use containers-storage: directly as it exists in the callers’ environment, without
|
||||
# creating a nested user namespace (which requires /etc/subuid and /etc/subgid to be set up)
|
||||
$(CONTAINER_CMD) --security-opt label=disable --cap-add=cap_mknod -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN) \
|
||||
$(MAKE) test-integration-local
|
||||
|
||||
|
||||
# Intended for CI, assumed to be running in quay.io/libpod/skopeo_cidev container.
|
||||
@@ -202,7 +209,6 @@ test-integration-local: bin/skopeo
|
||||
hack/make.sh test-integration
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
# TODO: The $(RM) command will likely fail w/o `podman unshare`
|
||||
test-system:
|
||||
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
|
||||
$(CONTAINER_CMD) --privileged \
|
||||
@@ -211,7 +217,7 @@ test-system:
|
||||
"$(SKOPEO_CIDEV_CONTAINER_FQIN)" \
|
||||
$(MAKE) test-system-local; \
|
||||
rc=$$?; \
|
||||
-$(RM) -rf $$DTEMP; \
|
||||
$(CONTAINER_RUNTIME) unshare rm -rf $$DTEMP; # This probably doesn't work with Docker, oh well, better than nothing... \
|
||||
exit $$rc
|
||||
|
||||
# Intended for CI, assumed to already be running in quay.io/libpod/skopeo_cidev container.
|
||||
@@ -242,12 +248,12 @@ test-unit-local: bin/skopeo
|
||||
$(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor:
|
||||
$(GO) mod tidy
|
||||
$(GO) mod tidy -compat=1.17
|
||||
$(GO) mod vendor
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
36
README.md
36
README.md
@@ -1,7 +1,8 @@
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
<!--- skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
--->
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/main/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -56,29 +57,37 @@ Examples:
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest
|
||||
{
|
||||
"Name": "registry.fedoraproject.org/fedora",
|
||||
"Digest": "sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9",
|
||||
"Digest": "sha256:0f65bee641e821f8118acafb44c2f8fe30c2fc6b9a2b3729c0660376391aa117",
|
||||
"RepoTags": [
|
||||
"24",
|
||||
"25",
|
||||
"26-modular",
|
||||
...
|
||||
"34-aarch64",
|
||||
"34",
|
||||
"latest",
|
||||
...
|
||||
],
|
||||
"Created": "2020-04-29T06:48:16Z",
|
||||
"Created": "2022-11-24T13:54:18Z",
|
||||
"DockerVersion": "1.10.1",
|
||||
"Labels": {
|
||||
"license": "MIT",
|
||||
"name": "fedora",
|
||||
"vendor": "Fedora Project",
|
||||
"version": "32"
|
||||
"version": "37"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:3088721d7dbf674fc0be64cd3cf00c25aab921cacf35fa0e7b1578500a3e1653"
|
||||
"sha256:2a0fc6bf62e155737f0ace6142ee686f3c471c1aab4241dc3128904db46288f0"
|
||||
],
|
||||
"LayersData": [
|
||||
{
|
||||
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"Digest": "sha256:2a0fc6bf62e155737f0ace6142ee686f3c471c1aab4241dc3128904db46288f0",
|
||||
"Size": 71355009,
|
||||
"Annotations": null
|
||||
}
|
||||
],
|
||||
"Env": [
|
||||
"DISTTAG=f32container",
|
||||
"FGC=f32",
|
||||
"DISTTAG=f37container",
|
||||
"FGC=f37",
|
||||
"container=oci"
|
||||
]
|
||||
}
|
||||
@@ -200,6 +209,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
|
||||
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-generate-sigstore-key(1)](/docs/skopeo-generate-sigstore-key.1.md) | Generate a sigstore public/private key pair. |
|
||||
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
|
||||
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
|
||||
@@ -207,7 +217,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/pkg/cli/sigstore"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
@@ -25,10 +27,11 @@ type copyOptions struct {
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstoreParamFile string // Sign the image using a sigstore signature per configuration in a param file
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing (for either signature format, but only one of them)
|
||||
signIdentity string // Identity of the signed image, must be a fully specified docker reference
|
||||
@@ -83,6 +86,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstoreParamFile, "sign-by-sigstore", "", "Sign the image using a sigstore parameter file at `PATH`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "Read a passphrase for signing an image from `PATH`")
|
||||
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
|
||||
@@ -252,6 +256,22 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
passphrase = p
|
||||
} // opts.signByFingerprint triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided.
|
||||
|
||||
var signers []*signer.Signer
|
||||
if opts.signBySigstoreParamFile != "" {
|
||||
signer, err := sigstore.NewSignerFromParameterFile(opts.signBySigstoreParamFile, &sigstore.Options{
|
||||
PrivateKeyPassphrasePrompt: func(keyFile string) (string, error) {
|
||||
return promptForPassphrase(keyFile, os.Stdin, os.Stdout)
|
||||
},
|
||||
Stdin: os.Stdin,
|
||||
Stdout: stdout,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error using --sign-by-sigstore: %w", err)
|
||||
}
|
||||
defer signer.Close()
|
||||
signers = append(signers, signer)
|
||||
}
|
||||
|
||||
var signIdentity reference.Named = nil
|
||||
if opts.signIdentity != "" {
|
||||
signIdentity, err = reference.ParseNamed(opts.signIdentity)
|
||||
@@ -260,9 +280,12 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
Signers: signers,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -70,7 +70,7 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
90
cmd/skopeo/generate_sigstore_key.go
Normal file
90
cmd/skopeo/generate_sigstore_key.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/signature/sigstore"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type generateSigstoreKeyOptions struct {
|
||||
outputPrefix string
|
||||
passphraseFile string
|
||||
}
|
||||
|
||||
func generateSigstoreKeyCmd() *cobra.Command {
|
||||
var opts generateSigstoreKeyOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "generate-sigstore-key [command options] --output-prefix PREFIX",
|
||||
Short: "Generate a sigstore public/private key pair",
|
||||
RunE: commandAction(opts.run),
|
||||
Example: "skopeo generate-sigstore-key --output-prefix my-key",
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.outputPrefix, "output-prefix", "", "Write the keys to `PREFIX`.pub and `PREFIX`.private")
|
||||
flags.StringVar(&opts.passphraseFile, "passphrase-file", "", "Read a passphrase for the private key from `PATH`")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// ensurePathDoesNotExist verifies that path does not refer to an existing file,
|
||||
// and returns an error if so.
|
||||
func ensurePathDoesNotExist(path string) error {
|
||||
switch _, err := os.Stat(path); {
|
||||
case err == nil:
|
||||
return fmt.Errorf("Refusing to overwrite existing %q", path)
|
||||
case errors.Is(err, fs.ErrNotExist):
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Error checking existence of %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *generateSigstoreKeyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 0 || opts.outputPrefix == "" {
|
||||
return errors.New("Usage: generate-sigstore-key --output-prefix PREFIX")
|
||||
}
|
||||
|
||||
pubKeyPath := opts.outputPrefix + ".pub"
|
||||
privateKeyPath := opts.outputPrefix + ".private"
|
||||
if err := ensurePathDoesNotExist(pubKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ensurePathDoesNotExist(privateKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var passphrase string
|
||||
if opts.passphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(opts.passphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
} else {
|
||||
p, err := promptForPassphrase(privateKeyPath, os.Stdin, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
passphrase = p
|
||||
}
|
||||
|
||||
keys, err := sigstore.GenerateKeyPair([]byte(passphrase))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error generating key pair: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(privateKeyPath, keys.PrivateKey, 0600); err != nil {
|
||||
return fmt.Errorf("Error writing private key to %q: %w", privateKeyPath, err)
|
||||
}
|
||||
if err := os.WriteFile(pubKeyPath, keys.PublicKey, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing private key to %q: %w", pubKeyPath, err)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Key written to %q and %q", privateKeyPath, pubKeyPath)
|
||||
return nil
|
||||
}
|
||||
79
cmd/skopeo/generate_sigstore_key_test.go
Normal file
79
cmd/skopeo/generate_sigstore_key_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenerateSigstoreKey(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"--output-prefix", "foo", "a1"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"generate-sigstore-key"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// One of the destination files already exists
|
||||
outputSuffixes := []string{".pub", ".private"}
|
||||
for _, suffix := range outputSuffixes {
|
||||
dir := t.TempDir()
|
||||
prefix := filepath.Join(dir, "prefix")
|
||||
err := os.WriteFile(prefix+suffix, []byte{}, 0600)
|
||||
require.NoError(t, err)
|
||||
out, err := runSkopeo("generate-sigstore-key",
|
||||
"--output-prefix", prefix, "--passphrase-file", "/dev/null",
|
||||
)
|
||||
assertTestFailed(t, out, err, "Refusing to overwrite")
|
||||
}
|
||||
|
||||
// One of the destinations is inaccessible (simulate by a symlink that tries to
|
||||
// traverse a non-directory)
|
||||
for _, suffix := range outputSuffixes {
|
||||
dir := t.TempDir()
|
||||
nonDirectory := filepath.Join(dir, "nondirectory")
|
||||
err := os.WriteFile(nonDirectory, []byte{}, 0600)
|
||||
require.NoError(t, err)
|
||||
prefix := filepath.Join(dir, "prefix")
|
||||
err = os.Symlink(filepath.Join(nonDirectory, "unaccessible"), prefix+suffix)
|
||||
require.NoError(t, err)
|
||||
out, err := runSkopeo("generate-sigstore-key",
|
||||
"--output-prefix", prefix, "--passphrase-file", "/dev/null",
|
||||
)
|
||||
assertTestFailed(t, out, err, prefix+suffix) // + an OS-specific error message
|
||||
}
|
||||
destDir := t.TempDir()
|
||||
// Error reading passphrase
|
||||
out, err := runSkopeo("generate-sigstore-key",
|
||||
"--output-prefix", filepath.Join(destDir, "prefix"),
|
||||
"--passphrase-file", filepath.Join(destDir, "this-does-not-exist"),
|
||||
)
|
||||
assertTestFailed(t, out, err, "this-does-not-exist")
|
||||
|
||||
// (The interactive passphrase prompting is not yet tested)
|
||||
|
||||
// Error writing outputs is untested: when unit tests run as root, we can’t use permissions on a directory to cause write failures,
|
||||
// with the --output-prefix mechanism, and refusing to even start writing to pre-exisiting files, directories are the only mechanism
|
||||
// we have to trigger a write failure.
|
||||
|
||||
// Success
|
||||
// Just a smoke-test, useability of the keys is tested in the generate implementation.
|
||||
dir := t.TempDir()
|
||||
prefix := filepath.Join(dir, "prefix")
|
||||
passphraseFile := filepath.Join(dir, "passphrase")
|
||||
err = os.WriteFile(passphraseFile, []byte("some passphrase"), 0600)
|
||||
require.NoError(t, err)
|
||||
out, err = runSkopeo("generate-sigstore-key",
|
||||
"--output-prefix", prefix, "--passphrase-file", passphraseFile,
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
for _, suffix := range outputSuffixes {
|
||||
assert.Contains(t, out, prefix+suffix)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/cmd/skopeo/inspect"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
@@ -96,7 +96,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -109,7 +109,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -132,7 +132,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -145,7 +145,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return nil
|
||||
} else if opts.config {
|
||||
var config *v1.Image
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -160,7 +160,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
} else {
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, config)
|
||||
err = printTmpl(row, data)
|
||||
err = printTmpl(stdout, row, data)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %w", err)
|
||||
@@ -168,7 +168,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -186,6 +186,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
LayersData: imgInspect.LayersData,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
@@ -202,12 +203,26 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
// Some registries may decide to block the "list all tags" endpoint;
|
||||
// gracefully allow the inspect to continue in this case:
|
||||
fatalFailure := true
|
||||
// - AWS ECR rejects it if the "ecr:ListImages" action is not allowed.
|
||||
// https://github.com/containers/skopeo/issues/726
|
||||
var ec errcode.ErrorCoder
|
||||
if ok := errors.As(err, &ec); ok && ec.ErrorCode() == errcode.ErrorCodeDenied {
|
||||
fatalFailure = false
|
||||
}
|
||||
// - public.ecr.aws does not implement the endpoint at all, and fails with 404:
|
||||
// https://github.com/containers/skopeo/issues/1230
|
||||
// This is actually "code":"NOT_FOUND", and the parser doesn’t preserve that.
|
||||
// So, also check the error text.
|
||||
if ok := errors.As(err, &ec); ok && ec.ErrorCode() == errcode.ErrorCodeUnknown {
|
||||
var e errcode.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == errcode.ErrorCodeUnknown && e.Message == "404 page not found" {
|
||||
fatalFailure = false
|
||||
}
|
||||
}
|
||||
if fatalFailure {
|
||||
return fmt.Errorf("Error determining repository tags: %w", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
@@ -222,14 +237,14 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, outputData)
|
||||
return printTmpl(row, data)
|
||||
return printTmpl(stdout, row, data)
|
||||
}
|
||||
|
||||
func printTmpl(row string, data []interface{}) error {
|
||||
func printTmpl(stdout io.Writer, row string, data []interface{}) error {
|
||||
t, err := template.New("skopeo inspect").Parse(row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
|
||||
w := tabwriter.NewWriter(stdout, 8, 2, 2, ' ', 0)
|
||||
return t.Execute(w, data)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package inspect
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -19,5 +20,6 @@ type Output struct {
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []types.ImageInspectLayer
|
||||
Env []string
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -68,13 +68,13 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -145,7 +145,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -160,7 +160,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -27,7 +27,7 @@ type tagListOutput struct {
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
@@ -120,7 +120,7 @@ func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tag
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -98,6 +98,7 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
rootCommand.AddCommand(
|
||||
copyCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
generateSigstoreKeyCmd(),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
loginCmd(&opts),
|
||||
|
||||
@@ -73,11 +73,16 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
ocilayout "github.com/containers/image/v5/oci/layout"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerdistributionerrcode "github.com/docker/distribution/registry/api/errcode"
|
||||
dockerdistributionapi "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -88,7 +93,9 @@ import (
|
||||
// 0.2.1: Initial version
|
||||
// 0.2.2: Added support for fetching image configuration as OCI
|
||||
// 0.2.3: Added GetFullConfig
|
||||
const protocolVersion = "0.2.3"
|
||||
// 0.2.4: Added OpenImageOptional
|
||||
// 0.2.5: Added LayerInfoJSON
|
||||
const protocolVersion = "0.2.5"
|
||||
|
||||
// maxMsgSize is the current limit on a packet size.
|
||||
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
|
||||
@@ -100,6 +107,9 @@ const maxMsgSize = 32 * 1024
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(uint64(1)<<53 - 1)
|
||||
|
||||
// sentinelImageID represents "image not found" on the wire
|
||||
const sentinelImageID = 0
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
@@ -166,6 +176,14 @@ type proxyHandler struct {
|
||||
activePipes map[uint32]*activePipe
|
||||
}
|
||||
|
||||
// convertedLayerInfo is the reduced form of the OCI type BlobInfo
|
||||
// Used in the return value of GetLayerInfo
|
||||
type convertedLayerInfo struct {
|
||||
Digest digest.Digest `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
MediaType string `json:"media_type"`
|
||||
}
|
||||
|
||||
// Initialize performs one-time initialization, and returns the protocol version
|
||||
func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
@@ -197,6 +215,29 @@ func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
|
||||
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// The return value is an opaque integer handle.
|
||||
func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
|
||||
return h.openImageImpl(args, false)
|
||||
}
|
||||
|
||||
// isDockerManifestUnknownError is a copy of code from containers/image,
|
||||
// please update there first.
|
||||
func isDockerManifestUnknownError(err error) bool {
|
||||
var ec dockerdistributionerrcode.ErrorCoder
|
||||
if !errors.As(err, &ec) {
|
||||
return false
|
||||
}
|
||||
return ec.ErrorCode() == dockerdistributionapi.ErrorCodeManifestUnknown
|
||||
}
|
||||
|
||||
// isNotFoundImageError heuristically attempts to determine whether an error
|
||||
// is saying the remote source couldn't find the image (as opposed to an
|
||||
// authentication error, an I/O error etc.)
|
||||
// TODO drive this into containers/image properly
|
||||
func isNotFoundImageError(err error) bool {
|
||||
return isDockerManifestUnknownError(err) ||
|
||||
errors.Is(err, ocilayout.ImageNotFoundError{})
|
||||
}
|
||||
|
||||
func (h *proxyHandler) openImageImpl(args []interface{}, allowNotFound bool) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
var ret replyBuf
|
||||
@@ -218,9 +259,15 @@ func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
|
||||
}
|
||||
imgsrc, err := imgRef.NewImageSource(context.Background(), h.sysctx)
|
||||
if err != nil {
|
||||
if allowNotFound && isNotFoundImageError(err) {
|
||||
ret.value = sentinelImageID
|
||||
return ret, nil
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Note that we never return zero as an imageid; this code doesn't yet
|
||||
// handle overflow though.
|
||||
h.imageSerial++
|
||||
openimg := &openImage{
|
||||
id: h.imageSerial,
|
||||
@@ -232,6 +279,13 @@ func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// The return value is an opaque integer handle. If the image does not exist, zero
|
||||
// is returned.
|
||||
func (h *proxyHandler) OpenImageOptional(args []interface{}) (replyBuf, error) {
|
||||
return h.openImageImpl(args, true)
|
||||
}
|
||||
|
||||
func (h *proxyHandler) CloseImage(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
@@ -278,6 +332,9 @@ func (h *proxyHandler) parseImageFromID(v interface{}) (*openImage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if imgid == sentinelImageID {
|
||||
return nil, fmt.Errorf("Invalid imageid value of zero")
|
||||
}
|
||||
imgref, ok := h.images[imgid]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no image %v", imgid)
|
||||
@@ -542,10 +599,12 @@ func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
|
||||
|
||||
piper, f, err := h.allocPipe()
|
||||
if err != nil {
|
||||
blobr.Close()
|
||||
return ret, err
|
||||
}
|
||||
go func() {
|
||||
// Signal completion when we return
|
||||
defer blobr.Close()
|
||||
defer f.wg.Done()
|
||||
verifier := d.Verifier()
|
||||
tr := io.TeeReader(blobr, verifier)
|
||||
@@ -568,6 +627,56 @@ func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetLayerInfo returns data about the layers of an image, useful for reading the layer contents.
|
||||
//
|
||||
// This needs to be called since the data returned by GetManifest() does not allow to correctly
|
||||
// calling GetBlob() for the containers-storage: transport (which doesn’t store the original compressed
|
||||
// representations referenced in the manifest).
|
||||
func (h *proxyHandler) GetLayerInfo(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("found %d args, expecting (imgid)", len(args))
|
||||
}
|
||||
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
layerInfos, err := img.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
if layerInfos == nil {
|
||||
layerInfos = img.LayerInfos()
|
||||
}
|
||||
|
||||
var layers []convertedLayerInfo
|
||||
for _, layer := range layerInfos {
|
||||
layers = append(layers, convertedLayerInfo{layer.Digest, layer.Size, layer.MediaType})
|
||||
}
|
||||
|
||||
ret.value = layers
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// FinishPipe waits for the worker goroutine to finish, and closes the write side of the pipe.
|
||||
func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
@@ -596,6 +705,17 @@ func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// close releases all resources associated with this proxy backend
|
||||
func (h *proxyHandler) close() {
|
||||
for _, image := range h.images {
|
||||
err := image.src.Close()
|
||||
if err != nil {
|
||||
// This shouldn't be fatal
|
||||
logrus.Warnf("Failed to close image %s: %v", transports.ImageName(image.cachedimg.Reference()), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send writes a reply buffer to the socket
|
||||
func (buf replyBuf) send(conn *net.UnixConn, err error) error {
|
||||
replyToSerialize := reply{
|
||||
@@ -678,6 +798,8 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
|
||||
rb, err = h.Initialize(req.Args)
|
||||
case "OpenImage":
|
||||
rb, err = h.OpenImage(req.Args)
|
||||
case "OpenImageOptional":
|
||||
rb, err = h.OpenImageOptional(req.Args)
|
||||
case "CloseImage":
|
||||
rb, err = h.CloseImage(req.Args)
|
||||
case "GetManifest":
|
||||
@@ -688,10 +810,14 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
|
||||
rb, err = h.GetFullConfig(req.Args)
|
||||
case "GetBlob":
|
||||
rb, err = h.GetBlob(req.Args)
|
||||
case "GetLayerInfo":
|
||||
rb, err = h.GetLayerInfo(req.Args)
|
||||
case "FinishPipe":
|
||||
rb, err = h.FinishPipe(req.Args)
|
||||
case "Shutdown":
|
||||
terminate = true
|
||||
// NOTE: If you add a method here, you should very likely be bumping the
|
||||
// const protocolVersion above.
|
||||
default:
|
||||
err = fmt.Errorf("unknown method: %s", req.Method)
|
||||
}
|
||||
@@ -705,6 +831,7 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
images: make(map[uint32]*openImage),
|
||||
activePipes: make(map[uint32]*activePipe),
|
||||
}
|
||||
defer handler.close()
|
||||
|
||||
// Convert the socket FD passed by client into a net.FileConn
|
||||
fd := os.NewFile(uintptr(opts.sockFd), "sock")
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/pkg/cli/sigstore"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -33,9 +35,10 @@ type syncOptions struct {
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstoreParamFile string // Sign the image using a sigstore signature per configuration in a param file
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
@@ -46,6 +49,7 @@ type syncOptions struct {
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
appendSuffix string // Suffix to append to destination image tag
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
@@ -106,12 +110,14 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signBySigstoreParamFile, "sign-by-sigstore", "", "Sign the image using a sigstore parameter file at `PATH`")
|
||||
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.StringVar(&opts.appendSuffix, "append-suffix", "", "String to append to DESTINATION tags")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
@@ -216,15 +222,7 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
|
||||
}
|
||||
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
|
||||
if err != nil {
|
||||
var unauthorizedForCredentials docker.ErrUnauthorizedForCredentials
|
||||
if errors.As(err, &unauthorizedForCredentials) {
|
||||
// Some registries may decide to block the "list all tags" endpoint.
|
||||
// Gracefully allow the sync to continue in this case.
|
||||
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
|
||||
tags = nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Error determining repository tags for image %s: %w", name, err)
|
||||
}
|
||||
return nil, fmt.Errorf("Error determining repository tags for repo %s: %w", name, err)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
@@ -244,7 +242,11 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"repo": repoRef.Name(),
|
||||
"tag": tag,
|
||||
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
|
||||
continue
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
@@ -548,6 +550,8 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
@@ -571,7 +575,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -604,13 +608,31 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
passphrase = p
|
||||
}
|
||||
|
||||
var signers []*signer.Signer
|
||||
if opts.signBySigstoreParamFile != "" {
|
||||
signer, err := sigstore.NewSignerFromParameterFile(opts.signBySigstoreParamFile, &sigstore.Options{
|
||||
PrivateKeyPassphrasePrompt: func(keyFile string) (string, error) {
|
||||
return promptForPassphrase(keyFile, os.Stdin, os.Stdout)
|
||||
},
|
||||
Stdin: os.Stdin,
|
||||
Stdout: stdout,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error using --sign-by-sigstore: %w", err)
|
||||
}
|
||||
defer signer.Close()
|
||||
signers = append(signers, signer)
|
||||
}
|
||||
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
Signers: signers,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
|
||||
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
|
||||
ReportWriter: os.Stdout,
|
||||
ReportWriter: stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
PreserveDigests: opts.preserveDigests,
|
||||
@@ -644,7 +666,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
destSuffix = path.Base(destSuffix)
|
||||
}
|
||||
|
||||
destRef, err := destinationReference(path.Join(destination, destSuffix), opts.destination)
|
||||
destRef, err := destinationReference(path.Join(destination, destSuffix)+opts.appendSuffix, opts.destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -657,7 +679,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
@@ -30,11 +31,11 @@ type errorShouldDisplayUsage struct {
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
@@ -174,8 +175,8 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLS
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
@@ -244,6 +245,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
@@ -252,12 +254,13 @@ type imageDestOptions struct {
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
imageDestFlagPrefix string
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
@@ -295,6 +298,19 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
|
||||
// Every user should call this as part of handling the CLI
|
||||
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
|
||||
if destTransport.Name() != directory.Transport.Name() {
|
||||
if opts.dirForceCompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
|
||||
}
|
||||
if opts.dirForceDecompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
|
||||
15
contrib/cirrus/ostree_ext.dockerfile
Normal file
15
contrib/cirrus/ostree_ext.dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
ARG BASE_FQIN=quay.io/coreos-assembler/fcos-buildroot:testing-devel
|
||||
FROM $BASE_FQIN
|
||||
|
||||
# See 'Danger of using COPY and ADD instructions'
|
||||
# at https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
|
||||
# Provide easy way to force-invalidate image cache by .cirrus.yml change
|
||||
ARG CIRRUS_IMAGE_VERSION
|
||||
ENV CIRRUS_IMAGE_VERSION=$CIRRUS_IMAGE_VERSION
|
||||
ADD https://sh.rustup.rs /var/tmp/rustup_installer.sh
|
||||
|
||||
RUN dnf erase -y rust && \
|
||||
chmod +x /var/tmp/rustup_installer.sh && \
|
||||
/var/tmp/rustup_installer.sh -y --default-toolchain stable --profile minimal
|
||||
|
||||
ENV PATH=/root/.cargo/bin:/root/.local/bin:/root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
@@ -6,6 +6,17 @@
|
||||
|
||||
set -e
|
||||
|
||||
_EOL=20270501
|
||||
if [[ $(date +%Y%m%d) -ge $_EOL ]]; then
|
||||
die "As of $_EOL this branch is probably
|
||||
no longer supported in RHEL 9.2/8.8, please
|
||||
confirm this with RHEL Program Management. If so:
|
||||
It should be removed from Cirrus-Cron,
|
||||
the .cirrus.yml file removed, and
|
||||
the VM images (manually) unmarked
|
||||
'permanent=true'"
|
||||
fi
|
||||
|
||||
# BEGIN Global export of all variables
|
||||
set -a
|
||||
|
||||
@@ -55,9 +66,6 @@ _run_setup() {
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
|
||||
# Required for testing the SIF transport
|
||||
dnf install -y fakeroot squashfs-tools
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
|
||||
@@ -115,18 +123,19 @@ _run_unit() {
|
||||
make test-unit-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_integration() {
|
||||
_podman_reset() {
|
||||
# Ensure we start with a clean-slate
|
||||
podman system reset --force
|
||||
showrun podman system reset --force
|
||||
}
|
||||
|
||||
_run_integration() {
|
||||
_podman_reset
|
||||
make test-integration-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_system() {
|
||||
# Ensure we start with a clean-slate
|
||||
podman system reset --force
|
||||
|
||||
# Executes with containers required for testing.
|
||||
_podman_reset
|
||||
##### Note: Test MODIFIES THE HOST SETUP #####
|
||||
make test-system-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,17 @@
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once commited/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/skopeo)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/stable)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/testing)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/main/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
|
||||
@@ -16,27 +16,11 @@ FROM registry.fedoraproject.org/fedora:latest
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y --enablerepo updates-testing --exclude container-selinux install \
|
||||
make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel && \
|
||||
mkdir /root/skopeo && \
|
||||
git clone https://github.com/containers/skopeo \
|
||||
/root/skopeo/src/github.com/containers/skopeo && \
|
||||
export GOPATH=/root/skopeo && \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo && \
|
||||
make bin/skopeo && \
|
||||
make PREFIX=/usr install && \
|
||||
rm -rf /root/skopeo/* && \
|
||||
dnf -y remove git golang go-md2man make && \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install skopeo \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
|
||||
14
default.yaml
14
default.yaml
@@ -1,8 +1,8 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# lookaside: indicates a location that is read and write
|
||||
# lookaside-staging: indicates a location that is only for write
|
||||
# lookaside: for reading/writing simple signing signatures
|
||||
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
|
||||
#
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
@@ -10,10 +10,12 @@
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
# The default locations are built-in, for both reading and writing:
|
||||
# /var/lib/containers/sigstore for root, or
|
||||
# ~/.local/share/containers/sigstore for non-root users.
|
||||
default-docker:
|
||||
# lookaside: file:///var/lib/containers/sigstore
|
||||
lookaside-staging: file:///var/lib/containers/sigstore
|
||||
# lookaside: https://…
|
||||
# lookaside-staging: file:///…
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -21,6 +23,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# lookaside: http://privateregistry.com/sigstore/
|
||||
# lookaside: https://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
@@ -93,6 +93,11 @@ Do not copy signatures, if any, from _source-image_. Necessary when copying a si
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore** _param-file_
|
||||
|
||||
Add a sigstore signature based on the options in the specified containers sigstore signing parameter file, _param-file_.
|
||||
See containers-sigstore-signing-params.yaml(5) for details about the file format.
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
@@ -214,12 +219,12 @@ The password to access the destination registry.
|
||||
## EXAMPLES
|
||||
|
||||
To just copy an image from one registry to another:
|
||||
```sh
|
||||
```console
|
||||
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
|
||||
```
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
```console
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
@@ -228,42 +233,46 @@ $ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
To create an archive consumable by `docker load` (but note that using a registry is almost always more efficient):
|
||||
```console
|
||||
$ skopeo copy docker://busybox:latest docker-archive:archive-file.tar:busybox:latest
|
||||
```
|
||||
|
||||
```sh
|
||||
# skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
|
||||
To copy and sign an image:
|
||||
```console
|
||||
$ skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
|
||||
```
|
||||
|
||||
To encrypt an image:
|
||||
```sh
|
||||
skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
|
||||
```console
|
||||
$ skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
|
||||
|
||||
openssl genrsa -out private.key 1024
|
||||
openssl rsa -in private.key -pubout > public.key
|
||||
$ openssl genrsa -out private.key 1024
|
||||
$ openssl rsa -in private.key -pubout > public.key
|
||||
|
||||
skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
$ skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```console
|
||||
$ skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
To copy encrypted image without decryption:
|
||||
```sh
|
||||
skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
|
||||
```console
|
||||
$ skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image that requires more than one key:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```console
|
||||
$ skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
Container images can also be partially encrypted by specifying the index of the layer. Layers are 0-indexed indices, with support for negative indexing. i.e. 0 is the first layer, -1 is the last layer.
|
||||
|
||||
Let's say out of 3 layers that the image `docker.io/library/nginx:1.17.8` is made up of, we only want to encrypt the 2nd layer,
|
||||
```sh
|
||||
skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```console
|
||||
$ skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
@@ -85,7 +85,7 @@ The password to access the registry.
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
```console
|
||||
$ skopeo delete docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
47
docs/skopeo-generate-sigstore-key.1.md
Normal file
47
docs/skopeo-generate-sigstore-key.1.md
Normal file
@@ -0,0 +1,47 @@
|
||||
% skopeo-generate-sigstore-key(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-generate-sigstore-key - Generate a sigstore public/private key pair.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo generate-sigstore-key** [*options*] **--output-prefix** _prefix_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Generates a public/private key pair suitable for creating sigstore image signatures.
|
||||
The private key is encrypted with a passphrase;
|
||||
if one is not provided using an option, this command prompts for it interactively.
|
||||
|
||||
The private key is written to _prefix_**.private** .
|
||||
The private key is written to _prefix_**.pub** .
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help**, **-h**
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--output-prefix** _prefix_
|
||||
|
||||
Mandatory.
|
||||
Path prefix for the output keys (_prefix_**.private** and _prefix_**.pub**).
|
||||
|
||||
**--passphrase-file** _path_
|
||||
|
||||
The passphare to use to encrypt the private key.
|
||||
Only the first line will be read.
|
||||
A passphrase stored in a file is of questionable security if other users can read this file.
|
||||
Do not use this option if at all avoidable.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```console
|
||||
$ skopeo generate-sigstore-key --output-prefix mykey
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-copy(1), containers-policy.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
@@ -87,74 +87,90 @@ Do not list the available tags from the repository in the output. When `true`, t
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
```console
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"Digest": "sha256:f99efcddc4dd6736d8a88cc1ab6722098ec1d77dbf7aed9a7a514fc997ca08e0",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
"20",
|
||||
"21",
|
||||
"..."
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Created": "2022-11-16T07:26:42.618327645Z",
|
||||
"DockerVersion": "20.10.12",
|
||||
"Labels": {
|
||||
"maintainer": "Clement Verna \u003ccverna@fedoraproject.org\u003e"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
"sha256:cb8b1ed77979b894115a983f391465651aa7eb3edd036be4b508eea47271eb93"
|
||||
],
|
||||
"LayersData": [
|
||||
{
|
||||
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"Digest": "sha256:cb8b1ed77979b894115a983f391465651aa7eb3edd036be4b508eea47271eb93",
|
||||
"Size": 65990920,
|
||||
"Annotations": null
|
||||
}
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"DISTTAG=f37container",
|
||||
"FGC=f37",
|
||||
"FBR=f37"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
To inspect python from the docker.io registry and not show the available tags:
|
||||
```sh
|
||||
```console
|
||||
$ skopeo inspect --no-tags docker://docker.io/library/python
|
||||
{
|
||||
"Name": "docker.io/library/python",
|
||||
"Digest": "sha256:5ca194a80ddff913ea49c8154f38da66a41d2b73028c5cf7e46bc3c1d6fda572",
|
||||
"Digest": "sha256:10fc14aa6ae69f69e4c953cffd9b0964843d8c163950491d2138af891377bc1d",
|
||||
"RepoTags": [],
|
||||
"Created": "2021-10-05T23:40:54.936108045Z",
|
||||
"DockerVersion": "20.10.7",
|
||||
"Created": "2022-11-16T06:55:28.566254104Z",
|
||||
"DockerVersion": "20.10.12",
|
||||
"Labels": null,
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:df5590a8898bedd76f02205dc8caa5cc9863267dbcd8aac038bcd212688c1cc7",
|
||||
"sha256:705bb4cb554eb7751fd21a994f6f32aee582fbe5ea43037db6c43d321763992b",
|
||||
"sha256:519df5fceacdeaadeec563397b1d9f4d7c29c9f6eff879739cab6f0c144f49e1",
|
||||
"sha256:ccc287cbeddc96a0772397ca00ec85482a7b7f9a9fac643bfddd87b932f743db",
|
||||
"sha256:e3f8e6af58ed3a502f0c3c15dce636d9d362a742eb5b67770d0cfcb72f3a9884",
|
||||
"sha256:aebed27b2d86a5a3a2cbe186247911047a7e432b9d17daad8f226597c0ea4276",
|
||||
"sha256:54c32182bdcc3041bf64077428467109a70115888d03f7757dcf614ff6d95ebe",
|
||||
"sha256:cc8b7caedab13af07adf4836e13af2d4e9e54d794129b0fd4c83ece6b1112e86",
|
||||
"sha256:462c3718af1d5cdc050cfba102d06c26f78fe3b738ce2ca2eb248034b1738945"
|
||||
"sha256:a8ca11554fce00d9177da2d76307bdc06df7faeb84529755c648ac4886192ed1",
|
||||
"sha256:e4e46864aba2e62ba7c75965e4aa33ec856ee1b1074dda6b478101c577b63abd",
|
||||
"..."
|
||||
],
|
||||
"LayersData": [
|
||||
{
|
||||
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"Digest": "sha256:a8ca11554fce00d9177da2d76307bdc06df7faeb84529755c648ac4886192ed1",
|
||||
"Size": 55038615,
|
||||
"Annotations": null
|
||||
},
|
||||
{
|
||||
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"Digest": "sha256:e4e46864aba2e62ba7c75965e4aa33ec856ee1b1074dda6b478101c577b63abd",
|
||||
"Size": 5164893,
|
||||
"Annotations": null
|
||||
},
|
||||
"..."
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"LANG=C.UTF-8",
|
||||
"GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D",
|
||||
"PYTHON_VERSION=3.10.0",
|
||||
"PYTHON_PIP_VERSION=21.2.4",
|
||||
"PYTHON_SETUPTOOLS_VERSION=57.5.0",
|
||||
"PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/d781367b97acf0ece7e9e304bf281e99b618bf10/public/get-pip.py",
|
||||
"PYTHON_GET_PIP_SHA256=01249aa3e58ffb3e1686b7141b4e9aac4d398ef4ac3012ed9dff8dd9f685ffe0"
|
||||
"...",
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
|
||||
amd64
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
|
||||
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
|
||||
```
|
||||
|
||||
@@ -79,7 +79,7 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
|
||||
|
||||
### Docker Transport
|
||||
To get the list of tags in the "fedora" repository from the docker.io registry (the repository name expands to "library/fedora" per docker transport canonical form):
|
||||
```sh
|
||||
```console
|
||||
$ skopeo list-tags docker://docker.io/fedora
|
||||
{
|
||||
"Repository": "docker.io/library/fedora",
|
||||
@@ -110,7 +110,7 @@ $ skopeo list-tags docker://docker.io/fedora
|
||||
|
||||
To list the tags in a local host docker/distribution registry on port 5000, in this case for the "fedora" repository:
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo list-tags docker://localhost:5000/fedora
|
||||
{
|
||||
"Repository": "localhost:5000/fedora",
|
||||
@@ -127,7 +127,7 @@ $ skopeo list-tags docker://localhost:5000/fedora
|
||||
|
||||
To list the tags in a local docker-archive file:
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
@@ -138,7 +138,7 @@ $ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
|
||||
Also supports more than one tags in an archive:
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
@@ -150,7 +150,7 @@ $ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
|
||||
Will include a source-index entry for each untagged image:
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
|
||||
{
|
||||
"Tags": [
|
||||
|
||||
@@ -57,41 +57,41 @@ Write more detailed information to stdout
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login -u testuser -p testpassword localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login --authfile authdir/myauths.json docker.io
|
||||
Username: testuser
|
||||
Password:
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login --tls-verify=false -u test -p test localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo login -u testuser --password-stdin < testpassword.txt docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ echo $testpassword | skopeo login -u testuser --password-stdin docker.io
|
||||
Login Succeeded!
|
||||
```
|
||||
|
||||
@@ -35,17 +35,17 @@ Require HTTPS and verify certificates when talking to the container registry or
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo logout docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo logout --authfile authdir/myauths.json docker.io
|
||||
Remove login credentials for docker.io
|
||||
```
|
||||
|
||||
```
|
||||
```console
|
||||
$ skopeo logout --all
|
||||
Remove login credentials for all registries
|
||||
```
|
||||
|
||||
@@ -18,7 +18,7 @@ Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
@@ -31,7 +31,7 @@ The passphare to use when signing with the key ID from `--sign-by`. Only the fir
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ Print usage statement
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
```console
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
skopeo\-sync - Synchronize images between registry repositories and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
Synchronize images between registry repoositories and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
@@ -66,7 +66,9 @@ Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
**--append-suffix** _tag-suffix_ String to append to destination tags.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
@@ -74,6 +76,11 @@ Print usage statement.
|
||||
|
||||
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-by-sigstore** _param-file_
|
||||
|
||||
Add a sigstore signature based on the options in the specified containers sigstore signing parameter file, _param-file_.
|
||||
See containers-sigstore-signing-params.yaml(5) for details about the file format.
|
||||
|
||||
**--sign-by-sigstore-private-key** _path_
|
||||
|
||||
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
|
||||
@@ -126,7 +133,7 @@ The password to access the destination registry.
|
||||
## EXAMPLES
|
||||
|
||||
### Synchronizing to a local directory
|
||||
```
|
||||
```console
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
@@ -144,7 +151,7 @@ Images are located at:
|
||||
/media/usb/busybox:1-glibc
|
||||
```
|
||||
Sync run
|
||||
```
|
||||
```console
|
||||
$ skopeo sync --src dir --dest docker /media/usb/busybox:1-glibc my-registry.local.lan/test/
|
||||
```
|
||||
Destination registry content:
|
||||
@@ -154,7 +161,7 @@ my-registry.local.lan/test/busybox 1-glibc
|
||||
```
|
||||
|
||||
### Synchronizing to a local directory, scoped
|
||||
```
|
||||
```console
|
||||
$ skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb
|
||||
```
|
||||
Images are located at:
|
||||
@@ -167,8 +174,8 @@ Images are located at:
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
|
||||
```console
|
||||
$ skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
@@ -177,8 +184,8 @@ registry.local.lan/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry keeping the repository
|
||||
```
|
||||
skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
|
||||
```console
|
||||
$ skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
@@ -186,6 +193,16 @@ REPO TAGS
|
||||
registry.local.lan/repo/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
|
||||
```
|
||||
|
||||
### Synchronizing to a container registry with tag suffix
|
||||
```console
|
||||
$ skopeo sync --src docker --dest docker --append-suffix '-mirror' registry.example.com/busybox my-registry.local.lan
|
||||
```
|
||||
Destination registry content:
|
||||
```
|
||||
REPO TAGS
|
||||
registry.local.lan/busybox 1-glibc-mirror, 1-musl-mirror, 1-ubuntu-mirror, ..., latest-mirror
|
||||
```
|
||||
|
||||
### YAML file content (used _source_ for `**--src yaml**`)
|
||||
|
||||
```yaml
|
||||
@@ -210,8 +227,8 @@ quay.io:
|
||||
- latest
|
||||
```
|
||||
If the yaml filename is `sync.yml`, sync run:
|
||||
```
|
||||
skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
|
||||
```console
|
||||
$ skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
|
||||
```
|
||||
This will copy the following images:
|
||||
- Repository `registry.example.com/busybox`: all images, as no tags are specified.
|
||||
|
||||
@@ -47,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**oci-archive:**_path_**:**_tag_
|
||||
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
|
||||
See [containers-transports(5)](https://github.com/containers/image/blob/main/docs/containers-transports.5.md) for details.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
@@ -101,6 +101,7 @@ Print the version number
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-generate-sigstore-key(1)](skopeo-generate-sigstore-key.1.md) | Generate a sigstore public/private key pair. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
@@ -108,16 +109,16 @@ Print the version number
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md) .
|
||||
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md) .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md).
|
||||
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md).
|
||||
|
||||
## SEE ALSO
|
||||
skopeo-login(1), docker-login(1), containers-auth.json(5), containers-storage.conf(5), containers-policy.json(5), containers-transports(5)
|
||||
|
||||
143
go.mod
143
go.mod
@@ -3,105 +3,134 @@ module github.com/containers/skopeo
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.48.0
|
||||
github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.41.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/containers/common v0.51.4
|
||||
github.com/containers/image/v5 v5.24.3
|
||||
github.com/containers/ocicrypt v1.1.10
|
||||
github.com/containers/storage v1.45.3
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/term v0.17.0
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.6 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.5.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/docker v20.10.23+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.24.1 // indirect
|
||||
github.com/go-openapi/spec v0.20.7 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/validate v0.22.0 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.10.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-containerregistry v0.13.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/trillian v1.5.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/klauspost/compress v1.15.15 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230130200452-c091e64aa391 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.0.0 // indirect
|
||||
github.com/sigstore/rekor v1.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.5.2 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.9.0 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.3.0 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.1 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.56.3 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5s
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -5,16 +5,36 @@ set -e
|
||||
# not all storage drivers are supported in a container
|
||||
# environment. Detect this and setup storage when
|
||||
# running in a container.
|
||||
if ((SKOPEO_CONTAINER_TESTS)) && [[ -r /etc/containers/storage.conf ]]; then
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
elif ((SKOPEO_CONTAINER_TESTS)); then
|
||||
cat >> /etc/containers/storage.conf << EOF
|
||||
#
|
||||
# Paradoxically (FIXME: clean this up), SKOPEO_CONTAINER_TESTS is set
|
||||
# both inside a container and without a container (in a CI VM); it actually means
|
||||
# "it is safe to desctructively modify the system for tests".
|
||||
#
|
||||
# On a CI VM, we can just use Podman as it is already configured; the changes below,
|
||||
# to use VFS, are necessary only inside a container, because overlay-inside-overlay
|
||||
# does not work. So, make these changes conditional on both
|
||||
# SKOPEO_CONTAINER_TESTS (for acceptability to do destructive modification) and !CI
|
||||
# (for necessity to adjust for in-container operation)
|
||||
if ((SKOPEO_CONTAINER_TESTS)) && [[ "$CI" != true ]]; then
|
||||
if [[ -r /etc/containers/storage.conf ]]; then
|
||||
echo "MODIFYING existing storage.conf"
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
else
|
||||
echo "CREATING NEW storage.conf"
|
||||
cat >> /etc/containers/storage.conf << EOF
|
||||
[storage]
|
||||
driver = "vfs"
|
||||
runroot = "/run/containers/storage"
|
||||
graphroot = "/var/lib/containers/storage"
|
||||
EOF
|
||||
fi
|
||||
# The logic of finding the relevant storage.conf file is convoluted
|
||||
# and in effect differs between Skopeo and Podman, at least in some versions;
|
||||
# explicitly point at the file we want to use to hopefully avoid that.
|
||||
export CONTAINERS_STORAGE_CONF=/etc/containers/storage.conf
|
||||
fi
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
|
||||
@@ -49,25 +49,25 @@ func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
//func skopeoCmd()
|
||||
|
||||
func (s *SkopeoSuite) TestVersion(c *check.C) {
|
||||
wanted := fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version)
|
||||
assertSkopeoSucceeds(c, wanted, "--version")
|
||||
assertSkopeoSucceeds(c, fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version),
|
||||
"--version")
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
wanted := ".*manifest unknown: manifest unknown.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoFails(c, ".*manifest unknown.*",
|
||||
"--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
|
||||
wanted := ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoFails(c, ".*authentication required.*",
|
||||
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
wanted := ".*unknown flag: --cert-path.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
assertSkopeoFails(c, ".*unknown flag: --cert-path.*",
|
||||
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
assertSkopeoFails(c, ".*authentication required.*",
|
||||
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
}
|
||||
|
||||
// TODO(runcom): as soon as we can push to registries ensure you can inspect here
|
||||
@@ -75,10 +75,8 @@ func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) {
|
||||
out, err := exec.Command(skopeoBinary, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput()
|
||||
c.Assert(err, check.NotNil, check.Commentf(string(out)))
|
||||
wanted := ".*manifest unknown.*"
|
||||
c.Assert(string(out), check.Matches, "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
c.Assert(string(out), check.Matches, "(?s).*manifest unknown.*") // (?s) : '.' will also match newlines
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s).*unauthorized: authentication required.*") // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
@@ -86,28 +84,28 @@ func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestLoginLogout(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "^Login Succeeded!\n$",
|
||||
"login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// test --get-login returns username
|
||||
wanted = fmt.Sprintf("^%s\n$", s.regV2WithAuth.username)
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, fmt.Sprintf("^%s\n$", s.regV2WithAuth.username),
|
||||
"login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
|
||||
// test logout
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url),
|
||||
"logout", s.regV2WithAuth.url)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
|
||||
wanted := "^Login Succeeded!\n$"
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "^Login Succeeded!\n$",
|
||||
"login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// copy to private registry using local authentication
|
||||
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
|
||||
// inspect from private registry
|
||||
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
|
||||
// logout from the registry
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url),
|
||||
"logout", s.regV2WithAuth.url)
|
||||
// inspect from private registry should fail after logout
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "inspect", "--tls-verify=false", imageName)
|
||||
assertSkopeoFails(c, ".*authentication required.*",
|
||||
"inspect", "--tls-verify=false", imageName)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,8 @@ const (
|
||||
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
|
||||
v2s1DockerRegistryURL = "localhost:5556"
|
||||
knownWindowsOnlyImage = "docker://mcr.microsoft.com/windows/nanoserver:1909"
|
||||
knownListImage = "docker://registry.fedoraproject.org/fedora-minimal" // could have either ":latest" or "@sha256:..." appended
|
||||
knownListImageRepo = "docker://registry.fedoraproject.org/fedora-minimal"
|
||||
knownListImage = knownListImageRepo + ":38"
|
||||
)
|
||||
|
||||
type CopySuite struct {
|
||||
@@ -196,8 +197,8 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
@@ -224,9 +225,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
@@ -240,9 +241,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
@@ -256,8 +257,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
digest := manifestDigest.String()
|
||||
_, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m)))
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
@@ -280,8 +281,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -314,8 +315,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -348,9 +349,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
@@ -383,7 +384,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
|
||||
var image1 imgspecv1.Image
|
||||
@@ -447,7 +448,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause", "dir:"+dir1)
|
||||
// "push": dir: → docker(v2s2):
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned")
|
||||
// The result of pushing and pulling is an unmodified image.
|
||||
@@ -461,14 +462,14 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
ociDest := "pause-latest-image"
|
||||
ociImgName := "pause"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err := os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
ociDest = "pause-latest-noimage"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -1036,7 +1037,8 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
|
||||
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -15,11 +15,15 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
|
||||
sudo make shell
|
||||
|
||||
to start a container, then within the container:
|
||||
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
// This image is known to be x86_64 only right now
|
||||
const knownNotManifestListedImage_x8664 = "docker://quay.io/coreos/11bot"
|
||||
|
||||
// knownNotExtantImage would be very surprising if it did exist
|
||||
const knownNotExtantImage = "docker://quay.io/centos/centos:opensusewindowsubuntu"
|
||||
|
||||
const expectedProxySemverMajor = "0.2"
|
||||
|
||||
// request is copied from proxy.go
|
||||
@@ -240,6 +243,29 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return fmt.Errorf("OpenImage return value is %T", v)
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
if imgid == 0 {
|
||||
return fmt.Errorf("got zero from expected image")
|
||||
}
|
||||
|
||||
// Also verify the optional path
|
||||
v, err = p.callNoFd("OpenImageOptional", []interface{}{knownNotManifestListedImage_x8664})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgidv, ok = v.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("OpenImageOptional return value is %T", v)
|
||||
}
|
||||
imgid2 := uint32(imgidv)
|
||||
if imgid2 == 0 {
|
||||
return fmt.Errorf("got zero from expected image")
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid2})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
@@ -292,6 +318,23 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func runTestOpenImageOptionalNotFound(p *proxy, img string) error {
|
||||
v, err := p.callNoFd("OpenImageOptional", []interface{}{img})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgidv, ok := v.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("OpenImageOptional return value is %T", v)
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
if imgid != 0 {
|
||||
return fmt.Errorf("Unexpected optional image id %v", imgid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ProxySuite) TestProxy(c *check.C) {
|
||||
p, err := newProxy()
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -307,4 +350,10 @@ func (s *ProxySuite) TestProxy(c *check.C) {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownListImage, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = runTestOpenImageOptionalNotFound(p, knownNotExtantImage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing optional image %s: %v", knownNotExtantImage, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -116,6 +116,7 @@ func (t *testRegistryV2) Ping() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
@@ -22,15 +22,15 @@ const (
|
||||
// A repository with a path with multiple components in it which
|
||||
// contains multiple tags, preferably with some tags pointing to
|
||||
// manifest lists, and with some tags that don't.
|
||||
pullableRepo = "k8s.gcr.io/coredns/coredns"
|
||||
pullableRepo = "registry.k8s.io/coredns/coredns"
|
||||
// A tagged image in the repository that we can inspect and copy.
|
||||
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
|
||||
pullableTaggedImage = "registry.k8s.io/coredns/coredns:v1.6.6"
|
||||
// A tagged manifest list in the repository that we can inspect and copy.
|
||||
pullableTaggedManifestList = "k8s.gcr.io/coredns/coredns:v1.8.0"
|
||||
pullableTaggedManifestList = "registry.k8s.io/coredns/coredns:v1.8.0"
|
||||
// A repository containing multiple tags, some of which are for
|
||||
// manifest lists, and which includes a "latest" tag. We specify the
|
||||
// name here without a tag.
|
||||
pullableRepoWithLatestTag = "k8s.gcr.io/pause"
|
||||
pullableRepoWithLatestTag = "registry.k8s.io/pause"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -305,7 +305,7 @@ func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images-by-tag-regex:
|
||||
pause: ^[12]\.0$ # regex string test
|
||||
`
|
||||
@@ -325,7 +325,7 @@ func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images:
|
||||
pause:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
@@ -342,7 +342,7 @@ func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
k8s.gcr.io:
|
||||
registry.k8s.io:
|
||||
images:
|
||||
coredns/coredns:
|
||||
- v1.8.0
|
||||
@@ -534,7 +534,7 @@ func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
assertSkopeoFails(c, ".*Error determining repository tags for repo docker.io/library/hopefully_no_images_will_ever_be_called_like_this: fetching tags list: requested access to the resource is denied.*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", "hopefully_no_images_will_ever_be_called_like_this", v2DockerRegistryURL)
|
||||
}
|
||||
|
||||
@@ -544,11 +544,11 @@ func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
assertSkopeoFails(c, ".*StatusCode: 404.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
assertSkopeoFails(c, ".*StatusCode: 404.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
@@ -557,11 +557,11 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
|
||||
assertSkopeoFails(c, ".*requested access to the resource is denied.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo, tmpDir)
|
||||
|
||||
//tagged
|
||||
assertSkopeoFails(c, ".*unauthorized: authentication required.*",
|
||||
assertSkopeoFails(c, ".*requested access to the resource is denied.*",
|
||||
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
|
||||
}
|
||||
|
||||
@@ -570,7 +570,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
assertSkopeoFails(c, ".*repository name not known to registry.*",
|
||||
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir)
|
||||
|
||||
//tagged
|
||||
|
||||
@@ -19,7 +19,7 @@ const decompressDirsBinary = "./decompress-dirs.sh"
|
||||
|
||||
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
|
||||
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
|
||||
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
|
||||
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:latest" // multi-layer
|
||||
|
||||
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
|
||||
@@ -16,4 +16,29 @@ function setup() {
|
||||
expect_output --substring "skopeo version [0-9.]+"
|
||||
}
|
||||
|
||||
@test "skopeo release isn't a development version" {
|
||||
[[ "${RELEASE_TESTING:-false}" == "true" ]] || \
|
||||
skip "Release testing may be enabled by setting \$RELEASE_TESTING = 'true'."
|
||||
|
||||
run_skopeo --version
|
||||
|
||||
# expect_output() doesn't support negative matching
|
||||
if [[ "$output" =~ "dev" ]]; then
|
||||
# This is a multi-line message, which may in turn contain multi-line
|
||||
# output, so let's format it ourselves, readably
|
||||
local -a output_split
|
||||
readarray -t output_split <<<"$output"
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
printf "#| FAIL: $BATS_TEST_NAME\n" >&2
|
||||
printf "#| unexpected: 'dev'\n" >&2
|
||||
printf "#| actual: '%s'\n" "${output_split[0]}" >&2
|
||||
local line
|
||||
for line in "${output_split[@]:1}"; do
|
||||
printf "#| > '%s'\n" "$line" >&2
|
||||
done
|
||||
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
||||
@@ -95,10 +95,11 @@ END_EXPECT
|
||||
# is created by the make-noarch-manifest script in this directory.
|
||||
img=docker://quay.io/libpod/notmyarch:20210121
|
||||
|
||||
# Get our host arch (what we're running on). This assumes that skopeo
|
||||
# arch matches podman; it also assumes running podman >= April 2020
|
||||
# (prior to that, the format keys were lower-case).
|
||||
arch=$(podman info --format '{{.Host.Arch}}')
|
||||
# Get our host golang arch (what we're running on, according to golang).
|
||||
# This assumes that skopeo arch matches host arch (which it always should).
|
||||
# Buildah is used here because it depends less on the exact system config
|
||||
# than podman - and all we're really after is the golang-flavored arch name.
|
||||
arch=$(go env GOARCH)
|
||||
|
||||
# By default, 'inspect' tries to match our host os+arch. This should fail.
|
||||
run_skopeo 1 inspect $img
|
||||
|
||||
@@ -50,7 +50,7 @@ function setup() {
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
|
||||
@@ -8,38 +8,40 @@ load helpers
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
# Remove old/stale cred file
|
||||
_cred_dir=$TESTDIR/credentials
|
||||
export XDG_RUNTIME_DIR=$_cred_dir
|
||||
mkdir -p $_cred_dir/containers
|
||||
rm -f $_cred_dir/containers/auth.json
|
||||
|
||||
# Start authenticated registry with random password
|
||||
testuser=testuser
|
||||
testpassword=$(random_string 15)
|
||||
|
||||
start_registry --testuser=$testuser --testpassword=$testpassword --enable-delete=true reg
|
||||
|
||||
_cred_dir=$TESTDIR/credentials
|
||||
# It is important to change XDG_RUNTIME_DIR only after we start the registry, otherwise it affects the path of $XDG_RUNTIME_DIR/netns maintained by Podman,
|
||||
# making it imposible to clean up after ourselves.
|
||||
export XDG_RUNTIME_DIR=$_cred_dir
|
||||
mkdir -p $_cred_dir/containers
|
||||
# Remove old/stale cred file
|
||||
rm -f $_cred_dir/containers/auth.json
|
||||
}
|
||||
|
||||
@test "auth: credentials on command line" {
|
||||
# No creds
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
expect_output --substring "authentication required"
|
||||
|
||||
# Wrong user
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=baduser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
expect_output --substring "authentication required"
|
||||
|
||||
# Wrong password
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
expect_output --substring "authentication required"
|
||||
|
||||
# Correct creds, but no such image
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "manifest unknown: manifest unknown"
|
||||
expect_output --substring "manifest unknown"
|
||||
|
||||
# These should pass
|
||||
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
|
||||
@@ -64,7 +66,7 @@ function setup() {
|
||||
podman logout localhost:5000
|
||||
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
expect_output --substring "authentication required"
|
||||
}
|
||||
|
||||
@test "auth: copy with --src-creds and --dest-creds" {
|
||||
@@ -94,7 +96,7 @@ function setup() {
|
||||
|
||||
# inspect without authfile: should fail
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
expect_output --substring "authentication required"
|
||||
|
||||
# inspect with authfile: should work
|
||||
run_skopeo inspect --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
|
||||
|
||||
@@ -10,7 +10,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
# Default image to run as a local registry
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@@ -1,2 +1,2 @@
|
||||
toml.test
|
||||
/toml.test
|
||||
/toml-test
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@@ -1 +0,0 @@
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
185
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
185
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,6 +1,5 @@
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages.
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
@@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
@@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool:
|
||||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
This package passes all tests in [toml-test] for both the decoder and the
|
||||
encoder.
|
||||
|
||||
[toml-test]: https://github.com/BurntSushi/toml-test
|
||||
|
||||
### Examples
|
||||
This package works similar to how the Go standard library handles XML and JSON.
|
||||
Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
@@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
@@ -48,20 +38,15 @@ type Config struct {
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
DOB time.Time
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
// handle error
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
@@ -73,139 +58,63 @@ type TOML struct {
|
||||
}
|
||||
```
|
||||
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
Can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_example/example.{go,toml}`.
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
|
||||
265
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
265
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -3,13 +3,16 @@ package toml
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
@@ -18,7 +21,9 @@ type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
||||
//
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
@@ -26,13 +31,12 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
@@ -45,7 +49,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
// You can use the PrimitiveDecode() function to "manually" decode these values.
|
||||
// You can use [PrimitiveDecode] to "manually" decode these values.
|
||||
//
|
||||
// NOTE: The underlying representation of a `Primitive` value is subject to
|
||||
// change. Do not rely on it.
|
||||
@@ -67,13 +71,16 @@ const (
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
// used interchangeably).
|
||||
// TOML tables correspond to Go structs or maps; they can be used
|
||||
// interchangeably, but structs offer better type safety.
|
||||
//
|
||||
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
|
||||
// local timezone.
|
||||
//
|
||||
// [time.Duration] types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
@@ -82,9 +89,9 @@ const (
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// time duration strings.
|
||||
// email addresses.
|
||||
//
|
||||
// Key mapping
|
||||
// ### Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||
@@ -111,6 +118,7 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
@@ -122,10 +130,10 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
@@ -135,7 +143,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, e("cannot decode to type %s", rt)
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
@@ -152,25 +160,25 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
|
||||
md := MetaData{
|
||||
mapping: p.mapping,
|
||||
types: p.types,
|
||||
keyInfo: p.keyInfo,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
// PrimitiveDecode is just like the other Decode* functions, except it decodes a
|
||||
// TOML value that has already been parsed. Valid primitive values can *only* be
|
||||
// obtained from values filled by the decoder functions, including this method.
|
||||
// (i.e., v may contain more [Primitive] values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
// Meta data for primitive values is included in the meta data returned by the
|
||||
// Decode* functions with one exception: keys returned by the Undecoded method
|
||||
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
||||
// Primitive will be considered undecoded. Executing this method will update the
|
||||
// undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
@@ -185,7 +193,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
if rv.Type() == primitiveType {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
@@ -197,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
@@ -218,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
@@ -244,15 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -261,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
@@ -287,13 +290,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -301,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
@@ -322,13 +326,22 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvkey.SetString(k)
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
@@ -343,7 +356,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
@@ -376,6 +389,18 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
@@ -384,11 +409,13 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
switch rvk {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return e("value %f is out of range for float32", num)
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
@@ -400,20 +427,11 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
|
||||
return e("value %d is out of range for float32", num)
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
|
||||
return e("value %d is out of range for float64", num)
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
default:
|
||||
panic("bug")
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -421,50 +439,46 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return md.badtype("integer", data)
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
@@ -489,7 +503,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case TextMarshaler:
|
||||
case encoding.TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -515,7 +529,30 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
@@ -534,7 +571,11 @@ func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
@@ -550,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
4
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
4
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
@@ -7,8 +7,8 @@ import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// DecodeFS is just like Decode, except it will automatically read the contents
|
||||
// of the file at `path` from a fs.FS instance.
|
||||
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||
// [Decode].
|
||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
|
||||
22
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
22
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@@ -1,13 +1,11 @@
|
||||
/*
|
||||
Package toml implements decoding and encoding of TOML files.
|
||||
|
||||
This package supports TOML v1.0.0, as listed on https://toml.io
|
||||
|
||||
There is also support for delaying decoding with the Primitive type, and
|
||||
querying the set of keys in a TOML document with the MetaData type.
|
||||
|
||||
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
and can be used to verify if TOML document is valid. It can also be used to
|
||||
print the type of each key.
|
||||
*/
|
||||
// Package toml implements decoding and encoding of TOML files.
|
||||
//
|
||||
// This package supports TOML v1.0.0, as specified at https://toml.io
|
||||
//
|
||||
// There is also support for delaying decoding with the Primitive type, and
|
||||
// querying the set of keys in a TOML document with the MetaData type.
|
||||
//
|
||||
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
// and can be used to verify if TOML document is valid. It can also be used to
|
||||
// print the type of each key.
|
||||
package toml
|
||||
|
||||
244
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
244
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -3,6 +3,7 @@ package toml
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -63,6 +64,12 @@ var dblQuotedReplacer = strings.NewReplacer(
|
||||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
@@ -72,9 +79,12 @@ type Marshaler interface {
|
||||
// Encoder encodes a Go to a TOML document.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions.
|
||||
// for [Decode].
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
// If you want to write arbitrary binary data then you will need to use
|
||||
@@ -85,6 +95,17 @@ type Marshaler interface {
|
||||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
@@ -109,7 +130,7 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the Encoder's writer.
|
||||
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
||||
//
|
||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||
// document.
|
||||
@@ -136,18 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case: time needs to be in ISO8601 format.
|
||||
//
|
||||
// Special case: if we can marshal the type to text, then we used that. This
|
||||
// prevents the encoder for handling these types as generic structs (or
|
||||
// whatever the underlying type of a TextMarshaler is).
|
||||
switch t := rv.Interface().(type) {
|
||||
case time.Time, encoding.TextMarshaler, Marshaler:
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
case Primitive:
|
||||
enc.encode(key, reflect.ValueOf(t.undecoded))
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,6 +230,9 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
@@ -219,11 +240,34 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
@@ -259,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,7 +324,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
elem := eindirect(rv.Index(i))
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
@@ -294,7 +338,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
trv := eindirect(rv.Index(i))
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
@@ -319,7 +363,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
switch rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
@@ -341,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
@@ -351,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for i, mapKey := range mapKeys {
|
||||
val := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
@@ -379,6 +423,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
@@ -395,7 +446,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
@@ -403,27 +455,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := rv.Field(i)
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
@@ -463,7 +504,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(fieldVal) {
|
||||
|
||||
if opts.omitempty && enc.isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(fieldVal) {
|
||||
@@ -502,6 +544,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
@@ -513,7 +570,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
if isTableArray(rv) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
@@ -523,67 +580,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
if _, ok := rv.Interface().(time.Time); ok {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
default:
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
switch rv.Interface().(type) {
|
||||
case encoding.TextMarshaler:
|
||||
return true
|
||||
case Marshaler:
|
||||
return true
|
||||
}
|
||||
|
||||
// Someone used a pointer receiver: we can make it work for pointer values.
|
||||
if rv.CanAddr() {
|
||||
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Addr().Interface().(Marshaler); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
/// Don't allow nil.
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
if tomlTypeOfGo(rv.Index(i)) == nil {
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
}
|
||||
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
return ret
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
@@ -624,10 +649,26 @@ func isZero(rv reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
if rv.Type().Comparable() {
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
}
|
||||
// Need to also check if all the fields are empty, otherwise something
|
||||
// like this with uncomparable types will always return true:
|
||||
//
|
||||
// type a struct{ field b }
|
||||
// type b struct{ s []string }
|
||||
// s := a{field: b{s: []string{"AAA"}}}
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
if !enc.isEmpty(rv.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
@@ -642,16 +683,15 @@ func (enc *Encoder) newline() {
|
||||
|
||||
// Write a key/value pair:
|
||||
//
|
||||
// key = <any value>
|
||||
// key = <any value>
|
||||
//
|
||||
// This is also used for "k = v" in inline tables; so something like this will
|
||||
// be written in three calls:
|
||||
//
|
||||
// ┌────────────────────┐
|
||||
// │ ┌───┐ ┌─────┐│
|
||||
// v v v v vv
|
||||
// key = {k = v, k2 = v2}
|
||||
//
|
||||
// ┌───────────────────┐
|
||||
// │ ┌───┐ ┌────┐│
|
||||
// v v v v vv
|
||||
// key = {k = 1, k2 = 2}
|
||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
@@ -679,13 +719,25 @@ func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
|
||||
120
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
120
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@@ -5,57 +5,60 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseError is returned when there is an error parsing the TOML syntax.
|
||||
//
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
// ParseError is returned when there is an error parsing the TOML syntax such as
|
||||
// invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithPosition():
|
||||
// information with context by using [ErrorWithPosition]:
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
// At line 4, column 2-7:
|
||||
// At line 4, column 2-7:
|
||||
//
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
//
|
||||
// Furthermore, the ErrorWithUsage() can be used to print the above with some
|
||||
// more detailed usage guidance:
|
||||
// [ErrorWithUsage] can be used to print the above with some more detailed usage
|
||||
// guidance:
|
||||
//
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
//
|
||||
// At line 1, column 18:
|
||||
// At line 1, column 18:
|
||||
//
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
//
|
||||
// Error help:
|
||||
// Error help:
|
||||
//
|
||||
// Inline tables must always be on a single line:
|
||||
// Inline tables must always be on a single line:
|
||||
//
|
||||
// table = {key = 42, second = 43}
|
||||
// table = {key = 42, second = 43}
|
||||
//
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
//
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
//
|
||||
// Use regular for this:
|
||||
// Use regular for this:
|
||||
//
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
type ParseError struct {
|
||||
Message string // Short technical message.
|
||||
Usage string // Longer message with usage guidance; may be blank.
|
||||
Position Position // Position of the error
|
||||
LastKey string // Last parsed key, may be blank.
|
||||
Line int // Line the error occurred. Deprecated: use Position.
|
||||
|
||||
// Line the error occurred.
|
||||
//
|
||||
// Deprecated: use [Position].
|
||||
Line int
|
||||
|
||||
err error
|
||||
input string
|
||||
@@ -83,7 +86,7 @@ func (pe ParseError) Error() string {
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
// See the documentation on [ParseError].
|
||||
func (pe ParseError) ErrorWithPosition() string {
|
||||
if pe.input == "" { // Should never happen, but just in case.
|
||||
return pe.Error()
|
||||
@@ -124,13 +127,17 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||
// guidance.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
// See the documentation on [ParseError].
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
return m + "Error help:\n\n " +
|
||||
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
|
||||
"\n"
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
@@ -160,6 +167,11 @@ type (
|
||||
errLexInvalidDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
@@ -179,6 +191,10 @@ func (e errLexInlineTableNL) Error() string { return "newlines not allowed withi
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
@@ -227,3 +243,37 @@ Instead use """ or ''' to split strings over multiple lines:
|
||||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
|
||||
19
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -82,7 +82,7 @@ func (lx *lexer) nextItem() item {
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
//fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -716,7 +716,17 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
if lx.peek() == '"' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
if strings.HasSuffix(lx.current(), `"""""`) {
|
||||
///
|
||||
/// Second check is for the edge case:
|
||||
///
|
||||
/// two quotes allowed.
|
||||
/// vv
|
||||
/// """lol \""""""
|
||||
/// ^^ ^^^---- closing three
|
||||
/// escaped
|
||||
///
|
||||
/// But ugly, but it works
|
||||
if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
|
||||
return lx.errorf(`unexpected '""""""'`)
|
||||
}
|
||||
lx.backup()
|
||||
@@ -761,7 +771,7 @@ func lexRawString(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// a string. It assumes that the beginning ''' has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
@@ -807,8 +817,7 @@ func lexMultilineRawString(lx *lexer) stateFn {
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
if isNL(lx.next()) { /// \ escaping newline.
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
|
||||
11
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
11
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@@ -12,10 +12,11 @@ import (
|
||||
type MetaData struct {
|
||||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
@@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
if typ, ok := md.types[Key(key).String()]; ok {
|
||||
return typ.typeString()
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -70,7 +71,7 @@ func (md *MetaData) Keys() []Key {
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// This includes keys that haven't been decoded because of a [Primitive] value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
@@ -88,7 +89,7 @@ func (md *MetaData) Undecoded() []Key {
|
||||
return undecoded
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
|
||||
54
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
54
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -16,12 +16,18 @@ type parser struct {
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
types map[string]tomlType // Map keyname → TOML type.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -57,8 +63,8 @@ func parse(data string) (p *parser, err error) {
|
||||
}
|
||||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
@@ -74,6 +80,15 @@ func parse(data string) (p *parser, err error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
@@ -94,7 +109,7 @@ func (p *parser) panicf(format string, v ...interface{}) {
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
@@ -146,7 +161,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.nextPos()
|
||||
@@ -158,7 +173,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
@@ -181,8 +196,9 @@ func (p *parser) topLevel(item item) {
|
||||
}
|
||||
|
||||
/// Set value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
@@ -266,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
@@ -304,7 +320,7 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
} else {
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
@@ -343,9 +359,8 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray)
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
// p.setType(p.currentKey, typ)
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
@@ -414,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
@@ -533,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) {
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType) {
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
@@ -599,7 +615,7 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
@@ -611,7 +627,7 @@ func (p *parser) setType(key string, typ tomlType) {
|
||||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
}
|
||||
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
@@ -619,7 +635,7 @@ func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
@@ -710,10 +726,8 @@ func (p *parser) replaceEscapes(it item, str string) string {
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
|
||||
1
vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
||||
9
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
9
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
@@ -1 +1,10 @@
|
||||
.vscode/
|
||||
|
||||
*.exe
|
||||
|
||||
# testing
|
||||
testdata
|
||||
|
||||
# go workspaces
|
||||
go.work
|
||||
go.work.sum
|
||||
|
||||
144
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
144
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
run:
|
||||
skip-dirs:
|
||||
- pkg/etw/sample
|
||||
|
||||
linters:
|
||||
enable:
|
||||
# style
|
||||
- containedctx # struct contains a context
|
||||
- dupl # duplicate code
|
||||
- errname # erorrs are named correctly
|
||||
- goconst # strings that should be constants
|
||||
- godot # comments end in a period
|
||||
- misspell
|
||||
- nolintlint # "//nolint" directives are properly explained
|
||||
- revive # golint replacement
|
||||
- stylecheck # golint replacement, less configurable than revive
|
||||
- unconvert # unnecessary conversions
|
||||
- wastedassign
|
||||
|
||||
# bugs, performance, unused, etc ...
|
||||
- contextcheck # function uses a non-inherited context
|
||||
- errorlint # errors not wrapped for 1.13
|
||||
- exhaustive # check exhaustiveness of enum switch statements
|
||||
- gofmt # files are gofmt'ed
|
||||
- gosec # security
|
||||
- nestif # deeply nested ifs
|
||||
- nilerr # returns nil even with non-nil error
|
||||
- prealloc # slices that can be pre-allocated
|
||||
- structcheck # unused struct fields
|
||||
- unparam # unused function params
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# err is very often shadowed in nested scopes
|
||||
- linters:
|
||||
- govet
|
||||
text: '^shadow: declaration of "err" shadows declaration'
|
||||
|
||||
# ignore long lines for skip autogen directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^line-length-limit: "
|
||||
source: "^//(go:generate|sys) "
|
||||
|
||||
# allow unjustified ignores of error checks in defer statements
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errcheck` should provide explanation"
|
||||
source: '^\s*defer '
|
||||
|
||||
# allow unjustified ignores of error lints for io.EOF
|
||||
- linters:
|
||||
- nolintlint
|
||||
text: "^directive `//nolint:errorlint` should provide explanation"
|
||||
source: '[=|!]= io.EOF'
|
||||
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
# struct order is often for Win32 compat
|
||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||
- fieldalignment
|
||||
check-shadowing: true
|
||||
nolintlint:
|
||||
allow-leading-space: false
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
revive:
|
||||
# revive is more configurable than static check, so likely the preferred alternative to static-check
|
||||
# (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
|
||||
enable-all-rules:
|
||||
true
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
rules:
|
||||
# rules with required arguments
|
||||
- name: argument-limit
|
||||
disabled: true
|
||||
- name: banned-characters
|
||||
disabled: true
|
||||
- name: cognitive-complexity
|
||||
disabled: true
|
||||
- name: cyclomatic
|
||||
disabled: true
|
||||
- name: file-header
|
||||
disabled: true
|
||||
- name: function-length
|
||||
disabled: true
|
||||
- name: function-result-limit
|
||||
disabled: true
|
||||
- name: max-public-structs
|
||||
disabled: true
|
||||
# geneally annoying rules
|
||||
- name: add-constant # complains about any and all strings and integers
|
||||
disabled: true
|
||||
- name: confusing-naming # we frequently use "Foo()" and "foo()" together
|
||||
disabled: true
|
||||
- name: flag-parameter # excessive, and a common idiom we use
|
||||
disabled: true
|
||||
# general config
|
||||
- name: line-length-limit
|
||||
arguments:
|
||||
- 140
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- []
|
||||
- - CID
|
||||
- CRI
|
||||
- CTRD
|
||||
- DACL
|
||||
- DLL
|
||||
- DOS
|
||||
- ETW
|
||||
- FSCTL
|
||||
- GCS
|
||||
- GMSA
|
||||
- HCS
|
||||
- HV
|
||||
- IO
|
||||
- LCOW
|
||||
- LDAP
|
||||
- LPAC
|
||||
- LTSC
|
||||
- MMIO
|
||||
- NT
|
||||
- OCI
|
||||
- PMEM
|
||||
- PWSH
|
||||
- RX
|
||||
- SACl
|
||||
- SID
|
||||
- SMB
|
||||
- TX
|
||||
- VHD
|
||||
- VHDX
|
||||
- VMID
|
||||
- VPCI
|
||||
- WCOW
|
||||
- WIM
|
||||
stylecheck:
|
||||
checks:
|
||||
- "all"
|
||||
- "-ST1003" # use revive's var naming
|
||||
74
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
74
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -13,16 +13,60 @@ Please see the LICENSE file for licensing information.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
This project welcomes contributions and suggestions.
|
||||
Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
|
||||
you have the right to, and actually do, grant us the rights to use your contribution.
|
||||
For details, visit [Microsoft CLA](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
|
||||
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to
|
||||
provide a CLA and decorate the PR appropriately (e.g., label, comment).
|
||||
Simply follow the instructions provided by the bot.
|
||||
You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
|
||||
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
|
||||
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
Additionally, the pull request pipeline requires the following steps to be performed before
|
||||
mergining.
|
||||
|
||||
### Code Sign-Off
|
||||
|
||||
We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
|
||||
to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
|
||||
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
|
||||
|
||||
Please see [the developer certificate](https://developercertificate.org) for more info,
|
||||
as well as to make sure that you can attest to the rules listed.
|
||||
Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
### Linting
|
||||
|
||||
Code must pass a linting stage, which uses [`golangci-lint`][lint].
|
||||
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
|
||||
automatically with VSCode by adding the following to your workspace or folder settings:
|
||||
|
||||
```json
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintOnSave": "package",
|
||||
```
|
||||
|
||||
Additional editor [integrations options are also available][lint-ide].
|
||||
|
||||
Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
|
||||
|
||||
```shell
|
||||
# use . or specify a path to only lint a package
|
||||
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
|
||||
> golangci-lint run ./...
|
||||
```
|
||||
|
||||
### Go Generate
|
||||
|
||||
The pipeline checks that auto-generated code, via `go generate`, are up to date.
|
||||
|
||||
This can be done for the entire repo:
|
||||
|
||||
```shell
|
||||
> go generate ./...
|
||||
```
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
@@ -30,8 +74,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
|
||||
|
||||
## Special Thanks
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
|
||||
Thanks to [natefinch][natefinch] for the inspiration for this library.
|
||||
See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
|
||||
|
||||
[lint]: https://golangci-lint.run/
|
||||
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
|
||||
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
|
||||
|
||||
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
|
||||
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
|
||||
|
||||
[natefinch]: https://github.com/natefinch
|
||||
|
||||
41
vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Microsoft/go-winio/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||
48
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
48
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -7,11 +8,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
@@ -24,7 +26,7 @@ const (
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupObjectId //revive:disable-line:var-naming ID, not Id
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
@@ -34,14 +36,16 @@ const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
//nolint:revive // var-naming: ALL_CAPS
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
WRITE_DAC = windows.WRITE_DAC
|
||||
WRITE_OWNER = windows.WRITE_OWNER
|
||||
ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
//revive:disable-next-line:var-naming ID, not Id
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
@@ -49,8 +53,8 @@ type BackupHeader struct {
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
type win32StreamID struct {
|
||||
StreamID uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
@@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
@@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err := io.Copy(io.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
var wsi win32StreamID
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Id: wsi.StreamID,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
@@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if wsi.StreamID == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
wsi := win32StreamID{
|
||||
StreamID: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
@@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
@@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
_ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
@@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
@@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
_ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
@@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
h, err := syscall.CreateFile(&winPath[0],
|
||||
access,
|
||||
share,
|
||||
nil,
|
||||
createmode,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
|
||||
0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
||||
2
vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
@@ -1,3 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
|
||||
146
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
146
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
@@ -7,7 +8,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,17 +18,18 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//nolint:deadcode,varcheck // keep unused constants for potential future use
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
cISUID = 0004000 // Set uid
|
||||
cISGID = 0002000 // Set gid
|
||||
cISVTX = 0001000 // Save text (sticky bit)
|
||||
cISDIR = 0040000 // Directory
|
||||
cISFIFO = 0010000 // FIFO
|
||||
cISREG = 0100000 // Regular file
|
||||
cISLNK = 0120000 // Symbolic link
|
||||
cISBLK = 0060000 // Block special file
|
||||
cISCHR = 0020000 // Character special file
|
||||
cISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -44,7 +45,7 @@ const (
|
||||
// zeroReader is an io.Reader that always returns 0s.
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zr zeroReader) Read(b []byte) (int, error) {
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
@@ -55,7 +56,7 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
@@ -71,8 +72,8 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
}
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
|
||||
return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err)
|
||||
if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
|
||||
return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err)
|
||||
}
|
||||
if bhdr.Size == 0 {
|
||||
// A sparse block with size = 0 is used to mark the end of the sparse blocks.
|
||||
@@ -106,7 +107,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Mode |= cISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
@@ -116,32 +117,29 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
|
||||
// from the tar header and returns the security descriptor into a byte slice.
|
||||
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
// Maintaining old SDDL-based behavior for backward
|
||||
// compatibility. All new tar headers written by this library
|
||||
// will have raw binary for the security descriptor.
|
||||
var sd []byte
|
||||
var err error
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
sd, err := base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
// Not returning sd as-is in the error-case, as base64.DecodeString
|
||||
// may return partially decoded data (not nil or empty slice) in case
|
||||
// of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387
|
||||
return nil, err
|
||||
}
|
||||
return sd, nil
|
||||
}
|
||||
return sd, nil
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new
|
||||
// tar headers written by this library will have raw binary for the security
|
||||
// descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
return winio.SddlToSecurityDescriptor(sddl)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
|
||||
// current file from the tar header and returns it as a byte slice.
|
||||
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
var eas []winio.ExtendedAttribute
|
||||
var eadata []byte
|
||||
var err error
|
||||
var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
@@ -155,13 +153,15 @@ func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
var eaData []byte
|
||||
var err error
|
||||
if len(eas) != 0 {
|
||||
eadata, err = winio.EncodeExtendedAttributes(eas)
|
||||
eaData, err = winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return eadata, nil
|
||||
return eaData, nil
|
||||
}
|
||||
|
||||
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
|
||||
@@ -182,11 +182,9 @@ func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
@@ -209,7 +207,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
@@ -217,21 +215,21 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
hdr.Mode |= cISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
sd, err := io.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Mode |= cISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
reparseBuffer, _ := io.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -242,7 +240,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
eab, err := io.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,7 +274,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
@@ -311,7 +309,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
// range of the file containing the range contents. Finally there is a sparse block stream with
|
||||
// size = 0 and offset = <file size>.
|
||||
|
||||
if dataHdr != nil {
|
||||
if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity
|
||||
// A data stream was found. Copy the data.
|
||||
// We assume that we will either have a data stream size > 0 XOR have sparse block streams.
|
||||
if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
|
||||
@@ -319,13 +317,13 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
if _, err = io.Copy(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from data stream: %s", name, err)
|
||||
return fmt.Errorf("%s: copying contents from data stream: %w", name, err)
|
||||
}
|
||||
} else if size > 0 {
|
||||
// As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
|
||||
// These files have no sparse block streams, so skip the copySparse call if file size = 0.
|
||||
if err = copySparse(t, br); err != nil {
|
||||
return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err)
|
||||
return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -335,7 +333,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
@@ -343,35 +341,30 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Format: hdr.Format,
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
|
||||
}
|
||||
altName := strings.TrimSuffix(bhdr.Name, ":$DATA")
|
||||
hdr = &tar.Header{
|
||||
Format: hdr.Format,
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
@@ -413,7 +406,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
}
|
||||
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
|
||||
}
|
||||
return
|
||||
return name, size, fileInfo, err
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
@@ -474,7 +467,6 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
|
||||
22
vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/doc.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// This package provides utilities for efficiently performing Win32 IO operations in Go.
|
||||
// Currently, this package is provides support for genreal IO and management of
|
||||
// - named pipes
|
||||
// - files
|
||||
// - [Hyper-V sockets]
|
||||
//
|
||||
// This code is similar to Go's [net] package, and uses IO completion ports to avoid
|
||||
// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
|
||||
//
|
||||
// This limits support to Windows Vista and newer operating systems.
|
||||
//
|
||||
// Additionally, this package provides support for:
|
||||
// - creating and managing GUIDs
|
||||
// - writing to [ETW]
|
||||
// - opening and manageing VHDs
|
||||
// - parsing [Windows Image files]
|
||||
// - auto-generating Win32 API code
|
||||
//
|
||||
// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
|
||||
// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
|
||||
// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
|
||||
package winio
|
||||
8
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
8
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
@@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
@@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
@@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
return ea, nb, err
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
@@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
return eas, err
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
|
||||
66
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
66
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -11,6 +11,8 @@ import (
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
@@ -24,6 +26,8 @@ type atomicBool int32
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
|
||||
//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
@@ -32,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool {
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
@@ -44,28 +43,28 @@ var (
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
func (*timeoutError) Error() string { return "i/o timeout" }
|
||||
func (*timeoutError) Timeout() bool { return true }
|
||||
func (*timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
// ioResult contains the result of an asynchronous IO operation.
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO.
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
func initIO() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -94,15 +93,15 @@ type deadlineHandler struct {
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
// makeWin32File makes a new win32File from an existing file handle.
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
ioInitOnce.Do(initIO)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
// closeHandle closes the resources associated with a Win32 handle.
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
_ = cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
@@ -144,14 +143,14 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
// IsClosed checks if the file has been closed.
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// prepareIO prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
func (f *win32File) prepareIO() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
@@ -164,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
// ioCompletionProcessor processes completed async IOs forever.
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
@@ -178,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) {
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// todo: helsaawy - create an asyncIO version that takes a context
|
||||
|
||||
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
@@ -200,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
@@ -210,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
_ = cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
@@ -221,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -239,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) {
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
@@ -254,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) {
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
c, err := f.prepareIO()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -266,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) {
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
29
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
29
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -14,13 +15,18 @@ import (
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
_ uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(bi)),
|
||||
uint32(unsafe.Sizeof(*bi)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
@@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
if err := windows.SetFileInformationByHandle(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileBasicInfo,
|
||||
(*byte)(unsafe.Pointer(bi)),
|
||||
uint32(unsafe.Sizeof(*bi)),
|
||||
); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
@@ -48,7 +59,10 @@ type FileStandardInfo struct {
|
||||
// GetFileStandardInfo retrieves ended information for the file.
|
||||
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
|
||||
si := &FileStandardInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
|
||||
windows.FileStandardInfo,
|
||||
(*byte)(unsafe.Pointer(si)),
|
||||
uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
@@ -65,7 +79,12 @@ type FileIDInfo struct {
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(
|
||||
windows.Handle(f.Fd()),
|
||||
windows.FileIdInfo,
|
||||
(*byte)(unsafe.Pointer(fileID)),
|
||||
uint32(unsafe.Sizeof(*fileID)),
|
||||
); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
|
||||
345
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
345
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -4,6 +4,8 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -12,16 +14,87 @@ import (
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/socket"
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
const afHVSock = 34 // AF_HYPERV
|
||||
|
||||
const (
|
||||
afHvSock = 34 // AF_HYPERV
|
||||
// Well known Service and VM IDs
|
||||
//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
||||
|
||||
socketError = ^uintptr(0)
|
||||
)
|
||||
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
|
||||
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
||||
return guid.GUID{}
|
||||
}
|
||||
|
||||
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
|
||||
func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
|
||||
return guid.GUID{
|
||||
Data1: 0xffffffff,
|
||||
Data2: 0xffff,
|
||||
Data3: 0xffff,
|
||||
Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
|
||||
func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
|
||||
return guid.GUID{
|
||||
Data1: 0xe0e16197,
|
||||
Data2: 0xdd56,
|
||||
Data3: 0x4a10,
|
||||
Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDSiloHost is the address of a silo's host partition:
|
||||
// - The silo host of a hosted silo is the utility VM.
|
||||
// - The silo host of a silo on a physical host is the physical host.
|
||||
func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
|
||||
return guid.GUID{
|
||||
Data1: 0x36bd0c5c,
|
||||
Data2: 0x7276,
|
||||
Data3: 0x4223,
|
||||
Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
|
||||
func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
|
||||
return guid.GUID{
|
||||
Data1: 0x90db8b89,
|
||||
Data2: 0xd35,
|
||||
Data3: 0x4f79,
|
||||
Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
|
||||
}
|
||||
}
|
||||
|
||||
// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
|
||||
// Listening on this VmId accepts connection from:
|
||||
// - Inside silos: silo host partition.
|
||||
// - Inside hosted silo: host of the VM.
|
||||
// - Inside VM: VM host.
|
||||
// - Physical host: Not supported.
|
||||
func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
|
||||
return guid.GUID{
|
||||
Data1: 0xa42e7cda,
|
||||
Data2: 0xd03f,
|
||||
Data3: 0x480c,
|
||||
Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
|
||||
}
|
||||
}
|
||||
|
||||
// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
|
||||
func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
|
||||
return guid.GUID{
|
||||
Data2: 0xfacb,
|
||||
Data3: 0x11e6,
|
||||
Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
|
||||
}
|
||||
}
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
@@ -36,8 +109,10 @@ type rawHvsockAddr struct {
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
var _ socket.RawSockaddr = &rawHvsockAddr{}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string {
|
||||
func (*HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
@@ -47,14 +122,14 @@ func (addr *HvsockAddr) String() string {
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
g := hvsockVsockServiceTemplate() // make a copy
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHvSock,
|
||||
Family: afHVSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
@@ -65,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// Sockaddr returns a pointer to and the size of this struct.
|
||||
//
|
||||
// Implements the [socket.RawSockaddr] interface, and allows use in
|
||||
// [socket.Bind] and [socket.ConnectEx].
|
||||
func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
|
||||
return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
|
||||
}
|
||||
|
||||
// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
|
||||
func (r *rawHvsockAddr) FromBytes(b []byte) error {
|
||||
n := int(unsafe.Sizeof(rawHvsockAddr{}))
|
||||
|
||||
if len(b) < n {
|
||||
return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
|
||||
}
|
||||
|
||||
copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
|
||||
if r.Family != afHVSock {
|
||||
return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
var _ net.Listener = &HvsockListener{}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
func newHvSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||
var _ net.Conn = &HvsockConn{}
|
||||
|
||||
func newHVSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
@@ -94,12 +197,12 @@ func newHvSocket() (*win32File, error) {
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHvSocket()
|
||||
sock, err := newHVSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
sa := addr.raw()
|
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
@@ -121,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr {
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHvSocket()
|
||||
sock, err := newHVSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
@@ -130,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIo()
|
||||
c, err := l.sock.prepareIO()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||
if err != nil {
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
|
||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
// The local address returned in the AcceptEx buffer is the same as the Listener socket's
|
||||
// address. However, the service GUID reported by GetSockName is different from the Listeners
|
||||
// socket, and is sometimes the same as the local address of the socket that dialed the
|
||||
// address, with the service GUID.Data1 incremented, but othertimes is different.
|
||||
// todo: does the local address matter? is the listener's address or the actual address appropriate?
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
|
||||
// initialize the accepted socket and update its properties with those of the listening socket
|
||||
if err = windows.Setsockopt(windows.Handle(sock.handle),
|
||||
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
|
||||
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
|
||||
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
|
||||
}
|
||||
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
@@ -160,43 +278,171 @@ func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
/* Need to finish ConnectEx handling
|
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||
sock, err := newHvSocket()
|
||||
// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
|
||||
type HvsockDialer struct {
|
||||
// Deadline is the time the Dial operation must connect before erroring.
|
||||
Deadline time.Time
|
||||
|
||||
// Retries is the number of additional connects to try if the connection times out, is refused,
|
||||
// or the host is unreachable
|
||||
Retries uint
|
||||
|
||||
// RetryWait is the time to wait after a connection error to retry
|
||||
RetryWait time.Duration
|
||||
|
||||
rt *time.Timer // redial wait timer
|
||||
}
|
||||
|
||||
// Dial the Hyper-V socket at addr.
|
||||
//
|
||||
// See [HvsockDialer.Dial] for more information.
|
||||
func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
|
||||
return (&HvsockDialer{}).Dial(ctx, addr)
|
||||
}
|
||||
|
||||
// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
|
||||
// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
|
||||
// retries.
|
||||
//
|
||||
// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
|
||||
func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
|
||||
op := "dial"
|
||||
// create the conn early to use opErr()
|
||||
conn = &HvsockConn{
|
||||
remote: *addr,
|
||||
}
|
||||
|
||||
if !d.Deadline.IsZero() {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithDeadline(ctx, d.Deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// preemptive timeout/cancellation check
|
||||
if err = ctx.Err(); err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
|
||||
sock, err := newHVSocket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := sock.prepareIo()
|
||||
|
||||
sa := addr.raw()
|
||||
err = socket.Bind(windows.Handle(sock.handle), &sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
|
||||
}
|
||||
|
||||
c, err := sock.prepareIO()
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||
for i := uint(0); i <= d.Retries; i++ {
|
||||
err = socket.ConnectEx(
|
||||
windows.Handle(sock.handle),
|
||||
&sa,
|
||||
nil, // sendBuf
|
||||
0, // sendDataLen
|
||||
&bytes,
|
||||
(*windows.Overlapped)(unsafe.Pointer(&c.o)))
|
||||
_, err = sock.asyncIO(c, nil, bytes, err)
|
||||
if i < d.Retries && canRedial(err) {
|
||||
if err = d.redialWait(ctx); err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
remote: *addr,
|
||||
|
||||
// update the connection properties, so shutdown can be used
|
||||
if err = windows.Setsockopt(
|
||||
windows.Handle(sock.handle),
|
||||
windows.SOL_SOCKET,
|
||||
windows.SO_UPDATE_CONNECT_CONTEXT,
|
||||
nil, // optvalue
|
||||
0, // optlen
|
||||
); err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
|
||||
}
|
||||
|
||||
// get the local name
|
||||
var sal rawHvsockAddr
|
||||
err = socket.GetSockName(windows.Handle(sock.handle), &sal)
|
||||
if err != nil {
|
||||
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
|
||||
}
|
||||
conn.local.fromRaw(&sal)
|
||||
|
||||
// one last check for timeout, since asyncIO doesn't check the context
|
||||
if err = ctx.Err(); err != nil {
|
||||
return nil, conn.opErr(op, err)
|
||||
}
|
||||
|
||||
conn.sock = sock
|
||||
sock = nil
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
*/
|
||||
|
||||
// redialWait waits before attempting to redial, resetting the timer as appropriate.
|
||||
func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
|
||||
if d.RetryWait == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.rt == nil {
|
||||
d.rt = time.NewTimer(d.RetryWait)
|
||||
} else {
|
||||
// should already be stopped and drained
|
||||
d.rt.Reset(d.RetryWait)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-d.rt.C:
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop and drain the timer
|
||||
if !d.rt.Stop() {
|
||||
<-d.rt.C
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
|
||||
func canRedial(err error) bool {
|
||||
//nolint:errorlint // guaranteed to be an Errno
|
||||
switch err {
|
||||
case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
|
||||
windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
// translate from "file closed" to "socket closed"
|
||||
if errors.Is(err, ErrFileClosed) {
|
||||
err = socket.ErrSocketClosed
|
||||
}
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
c, err := conn.sock.prepareIO()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
@@ -204,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsarecv", err)
|
||||
var eno windows.Errno
|
||||
if errors.As(err, &eno) {
|
||||
err = os.NewSyscallError("wsarecv", eno)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
@@ -230,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
c, err := conn.sock.prepareIO()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
@@ -238,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsasend", err)
|
||||
var eno windows.Errno
|
||||
if errors.As(err, &eno) {
|
||||
err = os.NewSyscallError("wsasend", eno)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
@@ -257,13 +505,19 @@ func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
// shutdown disables sending or receiving on a socket.
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
return socket.ErrSocketClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
// If the connection was closed, shutdowns fail with "not connected"
|
||||
if errors.Is(err, windows.WSAENOTCONN) ||
|
||||
errors.Is(err, windows.WSAESHUTDOWN) {
|
||||
err = socket.ErrSocketClosed
|
||||
}
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
@@ -273,7 +527,7 @@ func (conn *HvsockConn) shutdown(how int) error {
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
return conn.opErr("closeread", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -283,7 +537,7 @@ func (conn *HvsockConn) CloseRead() error {
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
return conn.opErr("closewrite", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -300,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
conn.SetReadDeadline(t)
|
||||
conn.SetWriteDeadline(t)
|
||||
// todo: implement `SetDeadline` for `win32File`
|
||||
if err := conn.SetReadDeadline(t); err != nil {
|
||||
return fmt.Errorf("set read deadline: %w", err)
|
||||
}
|
||||
if err := conn.SetWriteDeadline(t); err != nil {
|
||||
return fmt.Errorf("set write deadline: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
20
vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package socket
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
|
||||
// struct must meet the Win32 sockaddr requirements specified here:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
|
||||
//
|
||||
// Specifically, the struct size must be least larger than an int16 (unsigned short)
|
||||
// for the address family.
|
||||
type RawSockaddr interface {
|
||||
// Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
|
||||
// for the RawSockaddr's data to be overwritten by syscalls (if necessary).
|
||||
//
|
||||
// It is the callers responsibility to validate that the values are valid; invalid
|
||||
// pointers or size can cause a panic.
|
||||
Sockaddr() (unsafe.Pointer, int32, error)
|
||||
}
|
||||
179
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
Normal file
179
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
//go:build windows
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
|
||||
|
||||
//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
|
||||
//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
|
||||
//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const socketError = uintptr(^uint32(0))
|
||||
|
||||
var (
|
||||
// todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
|
||||
|
||||
ErrBufferSize = errors.New("buffer size")
|
||||
ErrAddrFamily = errors.New("address family")
|
||||
ErrInvalidPointer = errors.New("invalid pointer")
|
||||
ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
|
||||
)
|
||||
|
||||
// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
|
||||
|
||||
// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
|
||||
// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
|
||||
func GetSockName(s windows.Handle, rsa RawSockaddr) error {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
// although getsockname returns WSAEFAULT if the buffer is too small, it does not set
|
||||
// &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
|
||||
return getsockname(s, ptr, &l)
|
||||
}
|
||||
|
||||
// GetPeerName returns the remote address the socket is connected to.
|
||||
//
|
||||
// See [GetSockName] for more information.
|
||||
func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
return getpeername(s, ptr, &l)
|
||||
}
|
||||
|
||||
func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
|
||||
ptr, l, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
|
||||
}
|
||||
|
||||
return bind(s, ptr, l)
|
||||
}
|
||||
|
||||
// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
|
||||
// their sockaddr interface, so they cannot be used with HvsockAddr
|
||||
// Replicate functionality here from
|
||||
// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
|
||||
|
||||
// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
|
||||
// runtime via a WSAIoctl call:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
|
||||
|
||||
type runtimeFunc struct {
|
||||
id guid.GUID
|
||||
once sync.Once
|
||||
addr uintptr
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *runtimeFunc) Load() error {
|
||||
f.once.Do(func() {
|
||||
var s windows.Handle
|
||||
s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
|
||||
if f.err != nil {
|
||||
return
|
||||
}
|
||||
defer windows.CloseHandle(s) //nolint:errcheck
|
||||
|
||||
var n uint32
|
||||
f.err = windows.WSAIoctl(s,
|
||||
windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
|
||||
(*byte)(unsafe.Pointer(&f.id)),
|
||||
uint32(unsafe.Sizeof(f.id)),
|
||||
(*byte)(unsafe.Pointer(&f.addr)),
|
||||
uint32(unsafe.Sizeof(f.addr)),
|
||||
&n,
|
||||
nil, //overlapped
|
||||
0, //completionRoutine
|
||||
)
|
||||
})
|
||||
return f.err
|
||||
}
|
||||
|
||||
var (
|
||||
// todo: add `AcceptEx` and `GetAcceptExSockaddrs`
|
||||
WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
|
||||
Data1: 0x25a207b9,
|
||||
Data2: 0xddf3,
|
||||
Data3: 0x4660,
|
||||
Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
|
||||
}
|
||||
|
||||
connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
|
||||
)
|
||||
|
||||
func ConnectEx(
|
||||
fd windows.Handle,
|
||||
rsa RawSockaddr,
|
||||
sendBuf *byte,
|
||||
sendDataLen uint32,
|
||||
bytesSent *uint32,
|
||||
overlapped *windows.Overlapped,
|
||||
) error {
|
||||
if err := connectExFunc.Load(); err != nil {
|
||||
return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
|
||||
}
|
||||
ptr, n, err := rsa.Sockaddr()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
|
||||
}
|
||||
|
||||
// BOOL LpfnConnectex(
|
||||
// [in] SOCKET s,
|
||||
// [in] const sockaddr *name,
|
||||
// [in] int namelen,
|
||||
// [in, optional] PVOID lpSendBuffer,
|
||||
// [in] DWORD dwSendDataLength,
|
||||
// [out] LPDWORD lpdwBytesSent,
|
||||
// [in] LPOVERLAPPED lpOverlapped
|
||||
// )
|
||||
|
||||
func connectEx(
|
||||
s windows.Handle,
|
||||
name unsafe.Pointer,
|
||||
namelen int32,
|
||||
sendBuf *byte,
|
||||
sendDataLen uint32,
|
||||
bytesSent *uint32,
|
||||
overlapped *windows.Overlapped,
|
||||
) (err error) {
|
||||
// todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
|
||||
r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
|
||||
7,
|
||||
uintptr(s),
|
||||
uintptr(name),
|
||||
uintptr(namelen),
|
||||
uintptr(unsafe.Pointer(sendBuf)),
|
||||
uintptr(sendDataLen),
|
||||
uintptr(unsafe.Pointer(bytesSent)),
|
||||
uintptr(unsafe.Pointer(overlapped)),
|
||||
0,
|
||||
0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = error(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
72
vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
Normal file
72
vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
procgetpeername = modws2_32.NewProc("getpeername")
|
||||
procgetsockname = modws2_32.NewProc("getsockname")
|
||||
)
|
||||
|
||||
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
124
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
124
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -13,6 +14,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
@@ -21,10 +24,10 @@ import (
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
@@ -51,45 +54,22 @@ type securityDescriptor struct {
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr
|
||||
Dacl uintptr
|
||||
Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
|
||||
Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
|
||||
}
|
||||
|
||||
type ntstatus int32
|
||||
type ntStatus int32
|
||||
|
||||
func (status ntstatus) Err() error {
|
||||
func (status ntStatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
|
||||
cFILE_OPEN = 1
|
||||
cFILE_CREATE = 2
|
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||
|
||||
cSE_DACL_PRESENT = 4
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection")
|
||||
ErrPipeListenerClosed = net.ErrClosed
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
@@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
f.SetReadDeadline(t)
|
||||
f.SetWriteDeadline(t)
|
||||
return nil
|
||||
if err := f.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.SetWriteDeadline(t)
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
@@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
} else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
@@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s pipeAddress) Network() string {
|
||||
func (pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
@@ -184,16 +165,21 @@ func (s pipeAddress) String() string {
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
h, err := createFile(*path,
|
||||
access,
|
||||
0,
|
||||
nil,
|
||||
syscall.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
|
||||
0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
@@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
} else {
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
|
||||
defer cancel()
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if err == context.DeadlineExceeded {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
@@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 {
|
||||
if flags&windows.PIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
@@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0],
|
||||
&ntPath,
|
||||
0,
|
||||
0,
|
||||
).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
@@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
len := uint32(len(sd))
|
||||
sdb := localAlloc(0, len)
|
||||
l := uint32(len(sd))
|
||||
sdb := localAlloc(0, l)
|
||||
defer localFree(sdb)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
@@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: cSE_DACL_PRESENT,
|
||||
Control: windows.SE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||
typ |= windows.FILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(cFILE_OPEN)
|
||||
disposition := uint32(windows.FILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
if first {
|
||||
disposition = cFILE_CREATE
|
||||
disposition = windows.FILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
@@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
||||
h syscall.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||
err = ntCreateNamedPipeFile(&h,
|
||||
access,
|
||||
&oa,
|
||||
&iosb,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
|
||||
disposition,
|
||||
0,
|
||||
typ,
|
||||
0,
|
||||
0,
|
||||
0xffffffff,
|
||||
uint32(c.InputBufferSize),
|
||||
uint32(c.OutputBufferSize),
|
||||
&timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
@@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
@@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
@@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIo()
|
||||
c, err := p.prepareIO()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIo(c, nil, 0, err)
|
||||
if err != nil && err != cERROR_PIPE_CONNECTED {
|
||||
_, err = p.asyncIO(c, nil, 0, err)
|
||||
if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
16
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
16
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -1,5 +1,3 @@
|
||||
// +build windows
|
||||
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
@@ -9,24 +7,26 @@ package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/sha1" //nolint:gosec // not used for secure application
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122.
|
||||
// The variants specified by RFC 4122 section 4.1.1.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122
|
||||
VariantRFC4122 // RFC 4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
@@ -36,6 +36,10 @@ const (
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
func (v Version) String() string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
@@ -61,7 +65,7 @@ func NewV4() (GUID, error) {
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||
b := sha1.New()
|
||||
b := sha1.New() //nolint:gosec // not used for secure application
|
||||
namespaceBytes := namespace.ToArray()
|
||||
b.Write(namespaceBytes[:])
|
||||
b.Write(name)
|
||||
|
||||
1
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
1
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
3
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
3
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
27
vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
|
||||
|
||||
package guid
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[VariantUnknown-0]
|
||||
_ = x[VariantNCS-1]
|
||||
_ = x[VariantRFC4122-2]
|
||||
_ = x[VariantMicrosoft-3]
|
||||
_ = x[VariantFuture-4]
|
||||
}
|
||||
|
||||
const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
|
||||
|
||||
var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
|
||||
|
||||
func (i Variant) String() string {
|
||||
if i >= Variant(len(_Variant_index)-1) {
|
||||
return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
|
||||
}
|
||||
33
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
33
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package security
|
||||
@@ -20,6 +21,7 @@ type (
|
||||
trusteeForm uint32
|
||||
trusteeType uint32
|
||||
|
||||
//nolint:structcheck // structcheck thinks fields are unused, but the are used to pass data to OS
|
||||
explicitAccess struct {
|
||||
accessPermissions accessMask
|
||||
accessMode accessMode
|
||||
@@ -27,6 +29,7 @@ type (
|
||||
trustee trustee
|
||||
}
|
||||
|
||||
//nolint:structcheck,unused // structcheck thinks fields are unused, but the are used to pass data to OS
|
||||
trustee struct {
|
||||
multipleTrustee *trustee
|
||||
multipleTrusteeOperation int32
|
||||
@@ -44,6 +47,7 @@ const (
|
||||
desiredAccessReadControl desiredAccess = 0x20000
|
||||
desiredAccessWriteDac desiredAccess = 0x40000
|
||||
|
||||
//cspell:disable-next-line
|
||||
gvmga = "GrantVmGroupAccess:"
|
||||
|
||||
inheritModeNoInheritance inheritMode = 0x0
|
||||
@@ -56,9 +60,9 @@ const (
|
||||
shareModeRead shareMode = 0x1
|
||||
shareModeWrite shareMode = 0x2
|
||||
|
||||
sidVmGroup = "S-1-5-83-0"
|
||||
sidVMGroup = "S-1-5-83-0"
|
||||
|
||||
trusteeFormIsSid trusteeForm = 0
|
||||
trusteeFormIsSID trusteeForm = 0
|
||||
|
||||
trusteeTypeWellKnownGroup trusteeType = 5
|
||||
)
|
||||
@@ -67,6 +71,8 @@ const (
|
||||
// include Grant ACE entries for the VM Group SID. This is a golang re-
|
||||
// implementation of the same function in vmcompute, just not exported in
|
||||
// RS5. Which kind of sucks. Sucks a lot :/
|
||||
//
|
||||
//revive:disable-next-line:var-naming VM, not Vm
|
||||
func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
@@ -79,7 +85,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.CloseHandle(fd)
|
||||
defer syscall.CloseHandle(fd) //nolint:errcheck
|
||||
|
||||
// Get the current DACL and Security Descriptor. Must defer LocalFree on success.
|
||||
ot := objectTypeFileObject
|
||||
@@ -89,7 +95,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) //nolint:errcheck
|
||||
|
||||
// Generate a new DACL which is the current DACL with the required ACEs added.
|
||||
// Must defer LocalFree on success.
|
||||
@@ -97,7 +103,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) //nolint:errcheck
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
@@ -110,16 +116,19 @@ func GrantVmGroupAccess(name string) error {
|
||||
// createFile is a helper function to call [Nt]CreateFile to get a handle to
|
||||
// the file or directory.
|
||||
func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
namep := syscall.StringToUTF16(name)
|
||||
namep, err := syscall.UTF16FromString(name)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, fmt.Errorf("could not convernt name to UTF-16: %w", err)
|
||||
}
|
||||
da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
|
||||
sm := uint32(shareModeRead | shareModeWrite)
|
||||
fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
|
||||
if isDir {
|
||||
fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
fa |= syscall.FILE_FLAG_BACKUP_SEMANTICS
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
return syscall.InvalidHandle, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
@@ -128,9 +137,9 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
// The caller is responsible for LocalFree of the returned DACL on success.
|
||||
func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
sid, err := syscall.StringToSid(sidVMGroup)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVMGroup, err)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
@@ -139,12 +148,12 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
}
|
||||
|
||||
eaArray := []explicitAccess{
|
||||
explicitAccess{
|
||||
{
|
||||
accessPermissions: accessMaskDesiredPermission,
|
||||
accessMode: accessModeGrant,
|
||||
inheritance: inheritance,
|
||||
trustee: trustee{
|
||||
trusteeForm: trusteeFormIsSid,
|
||||
trusteeForm: trusteeFormIsSID,
|
||||
trusteeType: trusteeTypeWellKnownGroup,
|
||||
name: uintptr(unsafe.Pointer(sid)),
|
||||
},
|
||||
|
||||
2
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
package security
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go
|
||||
|
||||
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo
|
||||
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo
|
||||
|
||||
4
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
@@ -1,4 +1,6 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package security
|
||||
|
||||
|
||||
32
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
32
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -24,22 +25,17 @@ import (
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
SE_PRIVILEGE_ENABLED = 2
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
securityAnonymous = iota
|
||||
securityIdentification
|
||||
securityImpersonation
|
||||
securityDelegation
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
@@ -51,11 +47,9 @@ type PrivilegeError struct {
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := ""
|
||||
s := "Could not enable privilege "
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
} else {
|
||||
s = "Could not enable privilege "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
@@ -94,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
var privileges []uint64
|
||||
privileges := make([]uint64, 0, len(names))
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
@@ -127,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
p := windows.CurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
@@ -140,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
_ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, action)
|
||||
_ = binary.Write(&b, binary.LittleEndian, p)
|
||||
_ = binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
@@ -151,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
@@ -177,7 +171,7 @@ func getPrivilegeName(luid uint64) string {
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(securityImpersonation)
|
||||
err := impersonateSelf(windows.SecurityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
11
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
11
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
@@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, &data)
|
||||
_ = binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
binary.Write(&b, binary.LittleEndian, flags)
|
||||
_ = binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
binary.Write(&b, binary.LittleEndian, target16)
|
||||
_ = binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
_ = binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
64
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
64
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
@@ -1,23 +1,25 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const (
|
||||
cERROR_NONE_MAPPED = syscall.Errno(1332)
|
||||
)
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
@@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch e.Err {
|
||||
case cERROR_NONE_MAPPED:
|
||||
switch {
|
||||
case errors.Is(e.Err, windows.ERROR_INVALID_SID):
|
||||
s = "the security ID structure is invalid"
|
||||
case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
@@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string {
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Unwrap() error { return e.Err }
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
@@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Unwrap() error { return e.Err }
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
//
|
||||
//revive:disable-next-line:var-naming SID, not Sid
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
|
||||
return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
@@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) {
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
// LookupNameBySid looks up the name of an account by SID
|
||||
//
|
||||
//revive:disable-next-line:var-naming SID, not Sid
|
||||
func LookupNameBySid(sid string) (name string, err error) {
|
||||
if sid == "" {
|
||||
return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
sidBuffer, err := windows.UTF16PtrFromString(sid)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
var sidPtr *byte
|
||||
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sidPtr)))
|
||||
|
||||
var nameSize, refDomainSize, sidNameUse uint32
|
||||
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
nameBuffer := make([]uint16, nameSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{sid, err}
|
||||
}
|
||||
|
||||
name = windows.UTF16ToString(nameBuffer)
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
@@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// The returned string length seems to include an arbitrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
|
||||
4
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@@ -1,3 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package winio
|
||||
|
||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
|
||||
|
||||
5
vendor/github.com/Microsoft/go-winio/tools.go
generated
vendored
Normal file
5
vendor/github.com/Microsoft/go-winio/tools.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
//go:build tools
|
||||
|
||||
package winio
|
||||
|
||||
import _ "golang.org/x/tools/cmd/stringer"
|
||||
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
@@ -62,8 +62,8 @@ type OpenVirtualDiskParameters struct {
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||
// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating
|
||||
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||
type openVersion2 struct {
|
||||
getInfoOnly int32
|
||||
@@ -87,9 +87,10 @@ type AttachVirtualDiskParameters struct {
|
||||
}
|
||||
|
||||
const (
|
||||
//revive:disable-next-line:var-naming ALL_CAPS
|
||||
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
|
||||
|
||||
// Access Mask for opening a VHD
|
||||
// Access Mask for opening a VHD.
|
||||
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
|
||||
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
|
||||
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
|
||||
@@ -101,7 +102,7 @@ const (
|
||||
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
|
||||
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
|
||||
|
||||
// Flags for creating a VHD
|
||||
// Flags for creating a VHD.
|
||||
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
|
||||
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
|
||||
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
|
||||
@@ -109,12 +110,12 @@ const (
|
||||
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
|
||||
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
|
||||
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
|
||||
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
|
||||
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd
|
||||
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
|
||||
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
|
||||
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem
|
||||
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
|
||||
|
||||
// Flags for opening a VHD
|
||||
// Flags for opening a VHD.
|
||||
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
|
||||
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
|
||||
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
|
||||
@@ -127,7 +128,7 @@ const (
|
||||
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
|
||||
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
|
||||
|
||||
// Flags for attaching a VHD
|
||||
// Flags for attaching a VHD.
|
||||
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
|
||||
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
|
||||
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
|
||||
@@ -140,12 +141,14 @@ const (
|
||||
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
|
||||
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
|
||||
|
||||
// Flags for detaching a VHD
|
||||
// Flags for detaching a VHD.
|
||||
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
|
||||
)
|
||||
|
||||
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
|
||||
// default values.
|
||||
//
|
||||
//revive:disable-next-line:var-naming VHDX, not Vhdx
|
||||
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
params := CreateVirtualDiskParameters{
|
||||
Version: 2,
|
||||
@@ -172,6 +175,8 @@ func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
}
|
||||
|
||||
// DetachVhd detaches a vhd found at `path`.
|
||||
//
|
||||
//revive:disable-next-line:var-naming VHD, not Vhd
|
||||
func DetachVhd(path string) error {
|
||||
handle, err := OpenVirtualDisk(
|
||||
path,
|
||||
@@ -181,12 +186,16 @@ func DetachVhd(path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
defer syscall.CloseHandle(handle) //nolint:errcheck
|
||||
return DetachVirtualDisk(handle)
|
||||
}
|
||||
|
||||
// AttachVirtualDisk attaches a virtual hard disk for use.
|
||||
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
|
||||
func AttachVirtualDisk(
|
||||
handle syscall.Handle,
|
||||
attachVirtualDiskFlag AttachVirtualDiskFlag,
|
||||
parameters *AttachVirtualDiskParameters,
|
||||
) (err error) {
|
||||
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
|
||||
if err := attachVirtualDisk(
|
||||
handle,
|
||||
@@ -203,6 +212,8 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||
|
||||
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
|
||||
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
|
||||
//
|
||||
//revive:disable-next-line:var-naming VHD, not Vhd
|
||||
func AttachVhd(path string) (err error) {
|
||||
handle, err := OpenVirtualDisk(
|
||||
path,
|
||||
@@ -213,7 +224,7 @@ func AttachVhd(path string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
defer syscall.CloseHandle(handle)
|
||||
defer syscall.CloseHandle(handle) //nolint:errcheck
|
||||
params := AttachVirtualDiskParameters{Version: 2}
|
||||
if err := AttachVirtualDisk(
|
||||
handle,
|
||||
@@ -226,7 +237,11 @@ func AttachVhd(path string) (err error) {
|
||||
}
|
||||
|
||||
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
|
||||
func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
|
||||
func OpenVirtualDisk(
|
||||
vhdPath string,
|
||||
virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
openVirtualDiskFlags VirtualDiskFlag,
|
||||
) (syscall.Handle, error) {
|
||||
parameters := OpenVirtualDiskParameters{Version: 2}
|
||||
handle, err := OpenVirtualDiskWithParameters(
|
||||
vhdPath,
|
||||
@@ -241,7 +256,12 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask
|
||||
}
|
||||
|
||||
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
|
||||
func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
|
||||
func OpenVirtualDiskWithParameters(
|
||||
vhdPath string,
|
||||
virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
openVirtualDiskFlags VirtualDiskFlag,
|
||||
parameters *OpenVirtualDiskParameters,
|
||||
) (syscall.Handle, error) {
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
@@ -279,7 +299,12 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||
}
|
||||
|
||||
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
|
||||
func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
|
||||
func CreateVirtualDisk(
|
||||
path string,
|
||||
virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
createVirtualDiskFlags CreateVirtualDiskFlag,
|
||||
parameters *CreateVirtualDiskParameters,
|
||||
) (syscall.Handle, error) {
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
@@ -323,6 +348,8 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
}
|
||||
|
||||
// CreateDiffVhd is a helper function to create a differencing virtual disk.
|
||||
//
|
||||
//revive:disable-next-line:var-naming VHD, not Vhd
|
||||
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
|
||||
// Setting `ParentPath` is how to signal to create a differencing disk.
|
||||
createParams := &CreateVirtualDiskParameters{
|
||||
|
||||
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
@@ -1,4 +1,6 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package vhd
|
||||
|
||||
|
||||
45
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
45
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@@ -1,4 +1,6 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
@@ -47,9 +49,11 @@ var (
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
@@ -74,7 +78,6 @@ var (
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
@@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
@@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
@@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
@@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
6
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go
generated
vendored
@@ -86,6 +86,12 @@ type Container interface {
|
||||
// container to be terminated by some error condition (including calling
|
||||
// Close).
|
||||
Wait() error
|
||||
// WaitChannel returns the wait channel of the container
|
||||
WaitChannel() <-chan struct{}
|
||||
// WaitError returns the container termination error.
|
||||
// This function should only be called after the channel in WaitChannel()
|
||||
// is closed. Otherwise it is not thread safe.
|
||||
WaitError() error
|
||||
// Modify sends a request to modify container resources
|
||||
Modify(ctx context.Context, config interface{}) error
|
||||
}
|
||||
|
||||
6
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
@@ -154,7 +154,7 @@ func (e *HcsError) Error() string {
|
||||
|
||||
func (e *HcsError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
return ok && err.Temporary() //nolint:staticcheck
|
||||
}
|
||||
|
||||
func (e *HcsError) Timeout() bool {
|
||||
@@ -193,7 +193,7 @@ func (e *SystemError) Error() string {
|
||||
|
||||
func (e *SystemError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
return ok && err.Temporary() //nolint:staticcheck
|
||||
}
|
||||
|
||||
func (e *SystemError) Timeout() bool {
|
||||
@@ -224,7 +224,7 @@ func (e *ProcessError) Error() string {
|
||||
|
||||
func (e *ProcessError) Temporary() bool {
|
||||
err, ok := e.Err.(net.Error)
|
||||
return ok && err.Temporary()
|
||||
return ok && err.Temporary() //nolint:staticcheck
|
||||
}
|
||||
|
||||
func (e *ProcessError) Timeout() bool {
|
||||
|
||||
200
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
200
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@@ -4,17 +4,22 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/cow"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/jobobject"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -28,7 +33,8 @@ type System struct {
|
||||
waitBlock chan struct{}
|
||||
waitError error
|
||||
exitError error
|
||||
os, typ string
|
||||
os, typ, owner string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
@@ -38,6 +44,11 @@ func newSystem(id string) *System {
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation detail for silo naming, this should NOT be relied upon very heavily.
|
||||
func siloNameFmt(containerID string) string {
|
||||
return fmt.Sprintf(`\Container_%s`, containerID)
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcs::CreateComputeSystem"
|
||||
@@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
||||
}
|
||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||
computeSystem.owner = strings.ToLower(props.Owner)
|
||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||
// that ran Windows.
|
||||
@@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
}
|
||||
|
||||
computeSystem.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -275,11 +287,19 @@ func (computeSystem *System) waitBackground() {
|
||||
oc.SetSpanStatus(span, err)
|
||||
}
|
||||
|
||||
func (computeSystem *System) WaitChannel() <-chan struct{} {
|
||||
return computeSystem.waitBlock
|
||||
}
|
||||
|
||||
func (computeSystem *System) WaitError() error {
|
||||
return computeSystem.waitError
|
||||
}
|
||||
|
||||
// Wait synchronously waits for the compute system to shutdown or terminate. If
|
||||
// the compute system has already exited returns the previous error (if any).
|
||||
func (computeSystem *System) Wait() error {
|
||||
<-computeSystem.waitBlock
|
||||
return computeSystem.waitError
|
||||
<-computeSystem.WaitChannel()
|
||||
return computeSystem.WaitError()
|
||||
}
|
||||
|
||||
// ExitError returns an error describing the reason the compute system terminated.
|
||||
@@ -324,11 +344,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
// queryInProc handles querying for container properties without reaching out to HCS. `props`
|
||||
// will be updated to contain any data returned from the queries present in `types`. If any properties
|
||||
// failed to be queried they will be tallied up and returned in as the first return value. Failures on
|
||||
// query are NOT considered errors; the only failure case for this method is if the containers job object
|
||||
// cannot be opened.
|
||||
func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
|
||||
// In the future we can make use of some new functionality in the HCS that allows you
|
||||
// to pass a job object for HCS to use for the container. Currently, the only way we'll
|
||||
// be able to open the job/silo is if we're running as SYSTEM.
|
||||
jobOptions := &jobobject.Options{
|
||||
UseNTVariant: true,
|
||||
Name: siloNameFmt(computeSystem.id),
|
||||
}
|
||||
job, err := jobobject.Open(ctx, jobOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer job.Close()
|
||||
|
||||
var fallbackQueryTypes []hcsschema.PropertyType
|
||||
for _, propType := range types {
|
||||
switch propType {
|
||||
case hcsschema.PTStatistics:
|
||||
// Handle a bad caller asking for the same type twice. No use in re-querying if this is
|
||||
// filled in already.
|
||||
if props.Statistics == nil {
|
||||
props.Statistics, err = computeSystem.statisticsInProc(job)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
|
||||
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
default:
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
|
||||
return fallbackQueryTypes, nil
|
||||
}
|
||||
|
||||
// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
|
||||
// change to make grabbing the private working set total much more efficient.
|
||||
func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
|
||||
// Start timestamp for these stats before we grab them to match HCS
|
||||
timestamp := time.Now()
|
||||
|
||||
memInfo, err := job.QueryMemoryStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
processorInfo, err := job.QueryProcessorStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageInfo, err := job.QueryStorageStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
|
||||
// with the class SystemProcessInformation which returns an array containing system information for *every*
|
||||
// process running on the machine. They then grab the pids that are running in the container and filter down
|
||||
// the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
|
||||
// work well as performance should get worse if more processess are running on the machine in general and not
|
||||
// just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
|
||||
// as well which isn't great and is wasted work to fetch.
|
||||
//
|
||||
// HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
|
||||
// working set ourselves and ask for everything else seperately. The optimization we can make here is
|
||||
// to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
|
||||
// the private working set in a more efficient manner by:
|
||||
//
|
||||
// 1. Find the pids running in the silo
|
||||
// 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
|
||||
// 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
|
||||
// 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
|
||||
privateWorkingSet, err := job.QueryPrivateWorkingSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hcsschema.Statistics{
|
||||
Timestamp: timestamp,
|
||||
ContainerStartTime: computeSystem.startTime,
|
||||
Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
|
||||
Memory: &hcsschema.MemoryStats{
|
||||
MemoryUsageCommitBytes: memInfo.JobMemory,
|
||||
MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
|
||||
MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
|
||||
},
|
||||
Processor: &hcsschema.ProcessorStats{
|
||||
RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
|
||||
RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
|
||||
TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
|
||||
},
|
||||
Storage: &hcsschema.StorageStats{
|
||||
ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
|
||||
ReadSizeBytes: storageInfo.ReadStats.TotalSize,
|
||||
WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
|
||||
WriteSizeBytes: storageInfo.WriteStats.TotalSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
|
||||
func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
operation := "hcs::System::PropertiesV2"
|
||||
|
||||
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
||||
@@ -345,12 +469,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
properties := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
props := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
return props, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
// Let HCS tally up the total for VM based queries instead of querying ourselves.
|
||||
if computeSystem.typ != "container" {
|
||||
return computeSystem.hcsPropertiesV2Query(ctx, types)
|
||||
}
|
||||
|
||||
// Define a starter Properties struct with the default fields returned from every
|
||||
// query. Owner is only returned from Statistics but it's harmless to include.
|
||||
properties := &hcsschema.Properties{
|
||||
Id: computeSystem.id,
|
||||
SystemType: computeSystem.typ,
|
||||
RuntimeOsType: computeSystem.os,
|
||||
Owner: computeSystem.owner,
|
||||
}
|
||||
|
||||
logEntry := log.G(ctx)
|
||||
// First lets try and query ourselves without reaching to HCS. If any of the queries fail
|
||||
// we'll take note and fallback to querying HCS for any of the failed types.
|
||||
fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
|
||||
if err == nil && len(fallbackTypes) == 0 {
|
||||
return properties, nil
|
||||
} else if err != nil {
|
||||
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
||||
fallbackTypes = types
|
||||
}
|
||||
|
||||
logEntry.WithFields(logrus.Fields{
|
||||
logfields.ContainerID: computeSystem.id,
|
||||
"propertyTypes": fallbackTypes,
|
||||
}).Info("falling back to HCS for property type queries")
|
||||
|
||||
hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now add in anything that we might have successfully queried in process.
|
||||
if properties.Statistics != nil {
|
||||
hcsProperties.Statistics = properties.Statistics
|
||||
hcsProperties.Owner = properties.Owner
|
||||
}
|
||||
|
||||
// For future support for querying processlist in-proc as well.
|
||||
if properties.ProcessList != nil {
|
||||
hcsProperties.ProcessList = properties.ProcessList
|
||||
}
|
||||
|
||||
return hcsProperties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user