mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 05:49:52 +00:00
Compare commits
241 Commits
v1.2.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
723f050465 | ||
|
|
05e6fa25ed | ||
|
|
a0f7bcde83 | ||
|
|
8b694b2649 | ||
|
|
069208bedc | ||
|
|
623d90afbb | ||
|
|
0583c8e641 | ||
|
|
76b7472044 | ||
|
|
aab0d6da59 | ||
|
|
e44bf704dd | ||
|
|
e02b7b21b3 | ||
|
|
2c19d55c3f | ||
|
|
f110fb7969 | ||
|
|
2091af3db1 | ||
|
|
32aa9fe647 | ||
|
|
ffee6f7219 | ||
|
|
3761ea6714 | ||
|
|
8e25f6bbeb | ||
|
|
59ceb6a5ea | ||
|
|
78cb493f95 | ||
|
|
c5dcc3b7d5 | ||
|
|
038f70e6f5 | ||
|
|
53b9ccff29 | ||
|
|
e9207c4564 | ||
|
|
f21d940c6e | ||
|
|
c35944bec0 | ||
|
|
266dc3dc9a | ||
|
|
91d9ccf5e5 | ||
|
|
4e57679c9a | ||
|
|
68f188ae77 | ||
|
|
0faf160170 | ||
|
|
69decaeb1d | ||
|
|
001775e994 | ||
|
|
fc448c2253 | ||
|
|
b10d3e43a4 | ||
|
|
a32be320cb | ||
|
|
5e13a55444 | ||
|
|
c0d259712c | ||
|
|
70abdf7334 | ||
|
|
f232ae499b | ||
|
|
aba84840dc | ||
|
|
e536c4da34 | ||
|
|
a1a8692457 | ||
|
|
5a594bff65 | ||
|
|
2eb35e7af9 | ||
|
|
00490a2cbb | ||
|
|
9a10ee2f1f | ||
|
|
002b2e4db9 | ||
|
|
891d9750a3 | ||
|
|
d6912022b5 | ||
|
|
eab7c4b0d1 | ||
|
|
7898ffaf23 | ||
|
|
ce4304a0ad | ||
|
|
610c612129 | ||
|
|
ad9f1d7bb9 | ||
|
|
37f15d6d11 | ||
|
|
2d3f3ed901 | ||
|
|
65d3890ea1 | ||
|
|
87f36844c3 | ||
|
|
a81cd74734 | ||
|
|
5a3e8b6150 | ||
|
|
88979a6a88 | ||
|
|
146af8cd59 | ||
|
|
6b95125757 | ||
|
|
6ee20f9d2a | ||
|
|
c84fc7d243 | ||
|
|
060fe4b47f | ||
|
|
3a759d5136 | ||
|
|
f15564f705 | ||
|
|
85e0fde20e | ||
|
|
fe1cd126f6 | ||
|
|
7a74faf4c1 | ||
|
|
dbe6764b35 | ||
|
|
5485daff13 | ||
|
|
cfbabac961 | ||
|
|
5907b4ef08 | ||
|
|
c456cef9bd | ||
|
|
0196219924 | ||
|
|
e945435dea | ||
|
|
c5103c6b51 | ||
|
|
61722a8a70 | ||
|
|
cc3ddf4804 | ||
|
|
d9f4377831 | ||
|
|
0717014e46 | ||
|
|
80dcddef36 | ||
|
|
6b41287cbf | ||
|
|
bef5e4505e | ||
|
|
f5a028e4d9 | ||
|
|
3d1d2978d7 | ||
|
|
035eb33f1f | ||
|
|
6cbb0c4c88 | ||
|
|
663fe44f27 | ||
|
|
cc24482985 | ||
|
|
b7bf15bc8b | ||
|
|
61b62f9e93 | ||
|
|
2c8655e251 | ||
|
|
94d588c480 | ||
|
|
a85e3beccf | ||
|
|
3878a37660 | ||
|
|
be600975a9 | ||
|
|
15f0d5cd2f | ||
|
|
6fa634227c | ||
|
|
e224b78efc | ||
|
|
1c4b0fc33d | ||
|
|
81e66ffc46 | ||
|
|
5995ceedf9 | ||
|
|
7a9d638989 | ||
|
|
40f5a8cf69 | ||
|
|
a6e50d32d2 | ||
|
|
9a88c3986d | ||
|
|
ac5241482c | ||
|
|
aff1b6215b | ||
|
|
e0ba05af59 | ||
|
|
55b9782058 | ||
|
|
4ab7faa800 | ||
|
|
c51c7b4e4d | ||
|
|
3375a905cc | ||
|
|
f3c8d26cd8 | ||
|
|
e1dc30b6e1 | ||
|
|
a9e9bdc534 | ||
|
|
6c8b8c20f5 | ||
|
|
0e1ee196bd | ||
|
|
77a2e08eb2 | ||
|
|
a3c21f25c9 | ||
|
|
1e1952693a | ||
|
|
efc0170ee8 | ||
|
|
0d0a97eb00 | ||
|
|
47a6716921 | ||
|
|
18e6c6f17b | ||
|
|
ef6f46a3b5 | ||
|
|
31562124a3 | ||
|
|
b544c1be3a | ||
|
|
0c0a17b641 | ||
|
|
2e90a8af5a | ||
|
|
2294113c78 | ||
|
|
bdb117ded6 | ||
|
|
beadcbb17d | ||
|
|
fe57e80c18 | ||
|
|
ac07bf278a | ||
|
|
3c33cb4556 | ||
|
|
f94d85aa8e | ||
|
|
b0da05656d | ||
|
|
9828f21007 | ||
|
|
6ee4b2dc84 | ||
|
|
b3a15e7288 | ||
|
|
f771cb0d39 | ||
|
|
c4fb93647a | ||
|
|
81535c5244 | ||
|
|
7442052875 | ||
|
|
84232cf306 | ||
|
|
c339a1abe9 | ||
|
|
766927d1d4 | ||
|
|
fc78c93ad2 | ||
|
|
4987a67293 | ||
|
|
131b2b8c63 | ||
|
|
342b8398e2 | ||
|
|
6b260e1686 | ||
|
|
6294875a04 | ||
|
|
8cc9fcae6f | ||
|
|
4769dd0689 | ||
|
|
0fb1121f36 | ||
|
|
4aaa9b401d | ||
|
|
44087c4866 | ||
|
|
f36f7dbfdf | ||
|
|
07c0e6a50f | ||
|
|
ed321809d3 | ||
|
|
13ef91744c | ||
|
|
5b8fe7ffa5 | ||
|
|
8cd57ef8de | ||
|
|
1b813f805b | ||
|
|
f3a8a7360d | ||
|
|
42e9121eba | ||
|
|
4597c09522 | ||
|
|
2ec251c2e2 | ||
|
|
e717a59174 | ||
|
|
c88576b2fc | ||
|
|
901f7e9c47 | ||
|
|
0f4dc80c99 | ||
|
|
353f3a23e1 | ||
|
|
4b4ad6285e | ||
|
|
6b007c70c7 | ||
|
|
6a48870594 | ||
|
|
5d73dea577 | ||
|
|
82e461ff9d | ||
|
|
e30abff31b | ||
|
|
7fee9122fb | ||
|
|
2342171cdf | ||
|
|
58c9eccffd | ||
|
|
23fa1666dd | ||
|
|
fa2e385713 | ||
|
|
958c361c97 | ||
|
|
72e8af59aa | ||
|
|
873fbee01b | ||
|
|
1a3eb478a7 | ||
|
|
bc0ecfc8f6 | ||
|
|
4ad2c75b52 | ||
|
|
9662633059 | ||
|
|
18fe2fd00a | ||
|
|
19f9a6adc2 | ||
|
|
11b4fd3956 | ||
|
|
8d2c20f160 | ||
|
|
6d7d0e7d39 | ||
|
|
39f8117c27 | ||
|
|
e709329b03 | ||
|
|
1a3ae1411e | ||
|
|
35daba1194 | ||
|
|
65c5b0bf8d | ||
|
|
cd884fa529 | ||
|
|
3a72464068 | ||
|
|
32e242586c | ||
|
|
a7f4b26f90 | ||
|
|
9bcae7060a | ||
|
|
98fdb042a1 | ||
|
|
ceaee440a6 | ||
|
|
6eb4fb64a0 | ||
|
|
a75daba386 | ||
|
|
67d72d27c9 | ||
|
|
362f70b056 | ||
|
|
035e25496a | ||
|
|
10da9f7012 | ||
|
|
c18a977e96 | ||
|
|
0954077fd7 | ||
|
|
bde39ce91d | ||
|
|
a422316d48 | ||
|
|
21aa04e3c3 | ||
|
|
4cc72b9f69 | ||
|
|
50ff352e41 | ||
|
|
027d7e466a | ||
|
|
69f51ac183 | ||
|
|
d8bc8b62e9 | ||
|
|
f9773889a1 | ||
|
|
6dabefa9db | ||
|
|
5364f84119 | ||
|
|
4ba7d50174 | ||
|
|
12729c4d7e | ||
|
|
44beab63c9 | ||
|
|
669627d1b6 | ||
|
|
1c45df1e03 | ||
|
|
f91a9c569d | ||
|
|
248a1dd01a | ||
|
|
3a75b51b59 |
150
.cirrus.yml
Normal file
150
.cirrus.yml
Normal file
@@ -0,0 +1,150 @@
|
||||
---
|
||||
|
||||
# Main collection of env. vars to set for all tasks and scripts.
|
||||
env:
|
||||
####
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "main"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
GOPATH: &gopath "/var/tmp/go"
|
||||
GOBIN: "${GOPATH}/bin"
|
||||
GOCACHE: "${GOPATH}/cache"
|
||||
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
|
||||
# Required for consistency with containers/image CI
|
||||
SKOPEO_PATH: *gosrc
|
||||
CIRRUS_WORKING_DIR: *gosrc
|
||||
# The default is 'sh' if unspecified
|
||||
CIRRUS_SHELL: "/bin/bash"
|
||||
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
|
||||
SCRIPT_BASE: "./contrib/cirrus"
|
||||
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-34"
|
||||
PRIOR_FEDORA_NAME: "fedora-33"
|
||||
UBUNTU_NAME: "ubuntu-2104"
|
||||
PRIOR_UBUNTU_NAME: "ubuntu-2010"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6032583541653504"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Equivilent to image produced by 'make build-container'
|
||||
SKOPEO_CI_CONTAINER_FQIN: "quay.io/skopeo/ci:${DEST_BRANCH}"
|
||||
|
||||
|
||||
# Default timeout for each task
|
||||
timeout_in: 45m
|
||||
|
||||
|
||||
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
|
||||
|
||||
|
||||
validate_task:
|
||||
# The git-validation tool doesn't work well on branch or tag push,
|
||||
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
|
||||
# Only do validation for PRs.
|
||||
only_if: $CIRRUS_PR != ''
|
||||
container: &build_container
|
||||
image: "${SKOPEO_CI_CONTAINER_FQIN}"
|
||||
cpu: 4
|
||||
memory: 8
|
||||
script: make validate-local
|
||||
|
||||
|
||||
#####
|
||||
##### NOTE: This task is subtantially duplicated in the containers/image
|
||||
##### repository's `.cirrus.yml`. Changes made here should be fully merged
|
||||
##### prior to being manually duplicated and maintained in containers/image.
|
||||
#####
|
||||
test_skopeo_task:
|
||||
alias: test_skopeo
|
||||
depends_on:
|
||||
- validate
|
||||
gce_instance:
|
||||
image_project: libpod-218412
|
||||
zone: "us-central1-f"
|
||||
cpu: 2
|
||||
memory: "4Gb"
|
||||
# Required to be 200gig, do not modify - has i/o performance impact
|
||||
# according to gcloud CLI tool warning messages.
|
||||
disk: 200
|
||||
image_name: ${FEDORA_CACHE_IMAGE_NAME}
|
||||
matrix:
|
||||
- name: "Skopeo Test"
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
|
||||
- name: "Skopeo Test w/ opengpg"
|
||||
env:
|
||||
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||
setup_script: >-
|
||||
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
|
||||
vendor_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
|
||||
build_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
|
||||
validate_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" validate
|
||||
unit_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
|
||||
integration_script: >-
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
|
||||
system_script: >
|
||||
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
|
||||
|
||||
|
||||
# This task is critical. It updates the "last-used by" timestamp stored
|
||||
# in metadata for all VM images. This mechanism functions in tandem with
|
||||
# an out-of-band pruning operation to remove disused VM images.
|
||||
meta_task:
|
||||
name: "VM img. keepalive"
|
||||
alias: meta
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: >-
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
|
||||
GCPPROJECT: libpod-218412
|
||||
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: /usr/local/bin/entrypoint.sh
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
success_task:
|
||||
name: "Total Success"
|
||||
alias: success
|
||||
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
|
||||
depends_on:
|
||||
- validate
|
||||
- test_skopeo
|
||||
- meta
|
||||
container: *smallcontainer
|
||||
env:
|
||||
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
|
||||
TEST_ENVIRON: container
|
||||
clone_script: *noop
|
||||
script: /bin/true
|
||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "10:00"
|
||||
timezone: Europe/Berlin
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
25
.github/workflow/stale.yml
vendored
Normal file
25
.github/workflow/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
# Please refer to https://github.com/actions/stale/blob/master/action.yml
|
||||
# to see all config knobs of the stale action.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
|
||||
stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
|
||||
stale-issue-label: 'stale-issue'
|
||||
stale-pr-label: 'stale-pr'
|
||||
days-before-stale: 30
|
||||
days-before-close: 365
|
||||
remove-stale-when-updated: true
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,3 +5,6 @@ result
|
||||
|
||||
# ignore JetBrains IDEs (GoLand) config folder
|
||||
.idea
|
||||
|
||||
# Ignore the bin directory
|
||||
bin
|
||||
|
||||
86
.travis.yml
86
.travis.yml
@@ -1,27 +1,75 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
- os: osx
|
||||
|
||||
go:
|
||||
- 1.13.x
|
||||
- 1.15.x
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
install:
|
||||
# Ideally, the (brew update) should not be necessary and Travis would have fairly
|
||||
# frequenstly updated OS images; that’s not been the case historically.
|
||||
# In particular, explicitly unlink python@2, which has been removed from Homebrew
|
||||
# since the last OS image build (as of July 2020), but the Travis OS still
|
||||
# contains it, and it prevents updating of Python 3.
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew unlink python@2 && brew install gpgme ; fi
|
||||
env:
|
||||
global:
|
||||
# Multiarch manifest will support architectures from this list. It should be the same architectures, as ones in image-build-push step in this Travis config
|
||||
- MULTIARCH_MANIFEST_ARCHITECTURES="amd64 s390x ppc64le"
|
||||
# env variables for stable image build
|
||||
- STABLE_IMAGE=quay.io/skopeo/stable:v1.2.0
|
||||
- EXTRA_STABLE_IMAGE=quay.io/containers/skopeo:v1.2.0
|
||||
# env variable for upstream image build
|
||||
- UPSTREAM_IMAGE=quay.io/skopeo/upstream:master
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
# Just declaration of the image-build-push step with script actions to execute
|
||||
x_base_steps:
|
||||
- &image-build-push
|
||||
services:
|
||||
- docker
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
# skopeo upstream image build
|
||||
- make -f release/Makefile build-image/upstream
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/upstream ;
|
||||
fi
|
||||
|
||||
# skopeo stable image build
|
||||
- make -f release/Makefile build-image/stable
|
||||
# Push image in case if build is started via cron job or code is pushed to master
|
||||
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
|
||||
make -f release/Makefile push-image/stable ;
|
||||
fi
|
||||
|
||||
# Just declaration of stage order to run
|
||||
stages:
|
||||
# Build and push image for 1 architecture
|
||||
- name: image-build-push
|
||||
if: branch = master
|
||||
# Create and push image manifest to have multiarch image on top of architecture specific images
|
||||
- name: manifest-multiarch-push
|
||||
if: (type IN (push, cron)) AND branch = master
|
||||
|
||||
# Actual execution of steps
|
||||
jobs:
|
||||
include:
|
||||
# Run 3 image-build-push tasks in parallel for linux/amd64, linux/s390x and linux/ppc64le platforms (for upstream and stable)
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for amd64
|
||||
arch: amd64
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for s390x
|
||||
arch: s390x
|
||||
|
||||
- stage: image-build-push
|
||||
<<: *image-build-push
|
||||
name: images for ppc64le
|
||||
arch: ppc64le
|
||||
|
||||
# Run final task to generate multi-arch image manifests (for upstream and stable)
|
||||
- stage: manifest-multiarch-push
|
||||
os: linux
|
||||
dist: focal
|
||||
script:
|
||||
- make -f release/Makefile push-manifest-multiarch/upstream
|
||||
- make -f release/Makefile push-manifest-multiarch/stable
|
||||
|
||||
@@ -142,7 +142,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
- after tests complete successfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
|
||||
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
bats jq podman runc \
|
||||
golint \
|
||||
openssl \
|
||||
&& dnf clean all
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
@@ -20,6 +21,7 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
RUN set -x \
|
||||
&& export GO111MODULE=off \
|
||||
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
|
||||
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
@@ -33,6 +35,7 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& export GO111MODULE=off \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
|
||||
@@ -45,6 +48,7 @@ RUN set -x \
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
ENV container_magic 85531765-346b-4316-bdb8-358e4cca9e5d
|
||||
RUN go version
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
FROM golang:1.14-buster
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
libdevmapper-dev \
|
||||
libgpgme11-dev
|
||||
RUN dnf update -y && \
|
||||
dnf install -y \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
golang \
|
||||
gpgme-devel \
|
||||
make
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
|
||||
83
Makefile
83
Makefile
@@ -2,26 +2,23 @@
|
||||
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
ifeq ($(shell uname),Darwin)
|
||||
PREFIX ?= ${DESTDIR}/usr/local
|
||||
DARWIN_BUILD_TAG=
|
||||
# On macOS, (brew install gpgme) installs it within /usr/local, but /usr/local/include is not in the default search path.
|
||||
# Rather than hard-code this directory, use gpgme-config. Sadly that must be done at the top-level user
|
||||
# instead of locally in the gpgme subpackage, because cgo supports only pkg-config, not general shell scripts,
|
||||
# and gpgme does not install a pkg-config file.
|
||||
# On some plaforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
|
||||
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
|
||||
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
|
||||
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
|
||||
# If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored
|
||||
# (and the user will probably find out because the cgo compilation will fail).
|
||||
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
|
||||
else
|
||||
PREFIX ?= ${DESTDIR}/usr
|
||||
endif
|
||||
|
||||
INSTALLDIR=${PREFIX}/bin
|
||||
MANINSTALLDIR=${PREFIX}/share/man
|
||||
CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/containers/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
# Normally empty, DESTDIR can be used to relocate the entire install-tree
|
||||
DESTDIR ?=
|
||||
CONTAINERSCONFDIR ?= ${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
|
||||
SIGSTOREDIR ?= ${DESTDIR}/var/lib/containers/sigstore
|
||||
PREFIX ?= ${DESTDIR}/usr/local
|
||||
BINDIR ?= ${PREFIX}/bin
|
||||
MANDIR ?= ${PREFIX}/share/man
|
||||
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
|
||||
|
||||
GO ?= go
|
||||
GOBIN := $(shell $(GO) env GOBIN)
|
||||
@@ -32,6 +29,10 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Required for integration-tests to detect they are running inside a specific
|
||||
# container image. Env. var defined in image, make does not automatically
|
||||
# pass to children unless explicitly exported
|
||||
export container_magic
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
@@ -68,14 +69,14 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
@@ -86,11 +87,13 @@ endif
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs-in-container
|
||||
all: bin/skopeo docs
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo "Defaults to building bin/skopeo and docs"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'static' - Build statically linked binary"
|
||||
@@ -112,8 +115,11 @@ binary: cmd/skopeo
|
||||
# Update nix/nixpkgs.json its latest stable commit
|
||||
.PHONY: nixpkgs
|
||||
nixpkgs:
|
||||
@nix run -f channel:nixos-20.03 nix-prefetch-git -c nix-prefetch-git \
|
||||
--no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
|
||||
@nix run \
|
||||
-f channel:nixos-20.09 nix-prefetch-git \
|
||||
-c nix-prefetch-git \
|
||||
--no-deepClone \
|
||||
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
|
||||
|
||||
# Build statically linked binary
|
||||
.PHONY: static
|
||||
@@ -125,13 +131,16 @@ static:
|
||||
# Build w/o using containers
|
||||
.PHONY: bin/skopeo
|
||||
bin/skopeo:
|
||||
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
bin/skopeo.%:
|
||||
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
|
||||
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
|
||||
|
||||
build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
docs: $(MANPAGES)
|
||||
|
||||
@@ -145,22 +154,22 @@ clean:
|
||||
|
||||
install: install-binary install-docs install-completions
|
||||
install -d -m 755 ${SIGSTOREDIR}
|
||||
install -d -m 755 ${CONTAINERSSYSCONFIGDIR}
|
||||
install -m 644 default-policy.json ${CONTAINERSSYSCONFIGDIR}/policy.json
|
||||
install -d -m 755 ${CONTAINERSCONFDIR}
|
||||
install -m 644 default-policy.json ${CONTAINERSCONFDIR}/policy.json
|
||||
install -d -m 755 ${REGISTRIESDDIR}
|
||||
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
|
||||
|
||||
install-binary: bin/skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 bin/skopeo ${INSTALLDIR}/skopeo
|
||||
install -d -m 755 ${BINDIR}
|
||||
install -m 755 bin/skopeo ${BINDIR}/skopeo
|
||||
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
install -d -m 755 ${MANDIR}/man1
|
||||
install -m 644 docs/*.1 ${MANDIR}/man1
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
install -m 755 -d ${BASHCOMPLETIONSDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHCOMPLETIONSDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(CONTAINER_RUN) bash
|
||||
@@ -169,7 +178,11 @@ check: validate test-unit test-integration test-system
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-integration-local'
|
||||
|
||||
# Intended for CI, shortcut 'build-container' since already running inside container.
|
||||
test-integration-local:
|
||||
hack/make.sh test-integration
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
test-system: build-container
|
||||
@@ -177,11 +190,15 @@ test-system: build-container
|
||||
$(CONTAINER_CMD) --privileged \
|
||||
-v $$DTEMP:/var/lib/containers:Z -v /run/systemd/journal/socket:/run/systemd/journal/socket \
|
||||
"$(IMAGE)" \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-system-local'; \
|
||||
rc=$$?; \
|
||||
$(RM) -rf $$DTEMP; \
|
||||
exit $$rc
|
||||
|
||||
# Intended for CI, shortcut 'build-container' since already running inside container.
|
||||
test-system-local:
|
||||
hack/make.sh test-system
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
28
README.md
28
README.md
@@ -19,6 +19,7 @@ Skopeo works with API V2 container image registries such as [docker.io](https://
|
||||
For example you can copy images from one registry to another, without requiring privilege.
|
||||
* Inspecting a remote image showing its properties including its layers, without requiring you to pull the image to the host.
|
||||
* Deleting an image from an image repository.
|
||||
* Syncing an external image repository to an internal registry for air-gapped deployments.
|
||||
* When required by the repository, skopeo can pass the appropriate credentials and certificates for authentication.
|
||||
|
||||
Skopeo operates on the following image and repository types:
|
||||
@@ -121,7 +122,7 @@ $ skopeo inspect --config docker://registry.fedoraproject.org/fedora:latest | j
|
||||
]
|
||||
}
|
||||
```
|
||||
#### Show unverifed image's digest
|
||||
#### Show unverified image's digest
|
||||
```console
|
||||
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest'
|
||||
"sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9"
|
||||
@@ -136,7 +137,7 @@ $ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest
|
||||
|
||||
* Container Storage backends
|
||||
|
||||
- github.com/containers/storage (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
- [github.com/containers/storage](https://github.com/containers/storage) (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
|
||||
|
||||
- Docker daemon storage
|
||||
|
||||
@@ -154,17 +155,22 @@ $ skopeo copy oci:busybox_ocilayout:latest dir:existingemptydirectory
|
||||
$ skopeo delete docker://localhost:5000/imagename:latest
|
||||
```
|
||||
|
||||
## Syncing registries
|
||||
```console
|
||||
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
|
||||
```
|
||||
|
||||
## Authenticating to a registry
|
||||
|
||||
#### Private registries with authentication
|
||||
skopeo uses credentials from the --creds (for skopeo inspect|delete) or --src-creds|--dest-creds (for skopeo copy) flags, if set; otherwise it uses configuration set by skopeo login, podman login, buildah login, or docker login.
|
||||
|
||||
```console
|
||||
$ skopeo login --user USER docker://myregistrydomain.com:5000
|
||||
$ skopeo login --username USER myregistrydomain.com:5000
|
||||
Password:
|
||||
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
|
||||
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
|
||||
$ skopeo logout docker://myregistrydomain.com:5000
|
||||
$ skopeo logout myregistrydomain.com:5000
|
||||
```
|
||||
|
||||
#### Using --creds directly
|
||||
@@ -189,6 +195,20 @@ Contributing
|
||||
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
|
||||
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
|
||||
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](/docs/skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
skopeo is licensed under the Apache License, Version 2.0. See
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
@@ -12,11 +13,9 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type copyOptions struct {
|
||||
@@ -27,6 +26,7 @@ type copyOptions struct {
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
@@ -69,6 +69,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
|
||||
@@ -112,15 +113,9 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
manifestType, err = parseManifestFormat(opts.format.value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +179,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
@@ -196,6 +191,18 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.digestFile != "" {
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/common/pkg/report"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/image"
|
||||
@@ -23,6 +27,7 @@ type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
@@ -45,13 +50,16 @@ Supported transports:
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo inspect docker://docker.io/fedora`,
|
||||
RunE: commandAction(opts.run),
|
||||
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
|
||||
skopeo inspect --config docker://docker.io/alpine
|
||||
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.raw, "raw", false, "output raw manifest or configuration")
|
||||
flags.BoolVar(&opts.config, "config", false, "output configuration")
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output to a Go template")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
@@ -63,6 +71,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
rawManifest []byte
|
||||
src types.ImageSource
|
||||
imgInspect *types.ImageInspectInfo
|
||||
data []interface{}
|
||||
)
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
@@ -70,6 +79,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
if opts.raw && opts.format != "" {
|
||||
return errors.New("raw output does not support format option")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
@@ -106,12 +118,13 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest for image: %v", err)
|
||||
return errors.Wrapf(err, "Error parsing manifest for image")
|
||||
}
|
||||
|
||||
if opts.config && opts.raw {
|
||||
@@ -124,7 +137,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
return errors.Wrapf(err, "Error writing configuration blob to standard output")
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
@@ -135,9 +148,19 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
var out []byte
|
||||
out, err = json.MarshalIndent(config, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
} else {
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, config)
|
||||
err = printTmpl(row, data)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
return errors.Wrapf(err, "Error writing OCI-formatted configuration data to standard output")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -164,7 +187,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return errors.Wrapf(err, "Error computing manifest digest")
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
@@ -182,15 +205,35 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
|
||||
// action is not allowed.
|
||||
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
return errors.Wrapf(err, "Error determining repository tags")
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if report.IsJSON(opts.format) || opts.format == "" {
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err == nil {
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
}
|
||||
return err
|
||||
}
|
||||
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
|
||||
data = append(data, outputData)
|
||||
return printTmpl(row, data)
|
||||
}
|
||||
|
||||
func inspectNormalize(row string) string {
|
||||
r := strings.NewReplacer(
|
||||
".ImageID", ".Image",
|
||||
)
|
||||
return r.Replace(row)
|
||||
}
|
||||
|
||||
func printTmpl(row string, data []interface{}) error {
|
||||
t, err := template.New("skopeo inspect").Parse(row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
|
||||
return t.Execute(w, data)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
var defaultUserAgent = "skopeo/" + version.Version
|
||||
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
@@ -143,6 +145,7 @@ func (opts *globalOptions) newSystemContext() *types.SystemContext {
|
||||
VariantChoice: opts.overrideVariant,
|
||||
SystemRegistriesConfPath: opts.registriesConfPath,
|
||||
BigFilesTemporaryDir: opts.tmpDir,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.tlsVerify.present {
|
||||
|
||||
@@ -23,7 +23,10 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts, _ := fakeGlobalOptions(t, []string{})
|
||||
res := opts.newSystemContext()
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
// User-Agent is set by default.
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
// Set everything to non-default values.
|
||||
opts, _ = fakeGlobalOptions(t, []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
@@ -43,5 +46,6 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
SystemRegistriesConfPath: "/srv/registries.conf",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ func TestManifestDigest(t *testing.T) {
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("manifest-digest", "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
out, err := runSkopeo("manifest-digest", "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error computing manifest
|
||||
out, err = runSkopeo("manifest-digest", "fixtures/v2s1-invalid-signatures.manifest.json")
|
||||
|
||||
@@ -58,8 +58,8 @@ func TestStandaloneSign(t *testing.T) {
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
"/this/doesnt/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
"/this/does/not/exist", dockerReference, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Invalid Docker reference
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
@@ -117,14 +117,14 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("standalone-verify", "/this/doesnt/exist",
|
||||
out, err := runSkopeo("standalone-verify", "/this/does/not/exist",
|
||||
dockerReference, fixturesTestKeyFingerprint, signaturePath)
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
dockerReference, fixturesTestKeyFingerprint, "/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error verifying signature
|
||||
out, err = runSkopeo("standalone-verify", manifestPath,
|
||||
@@ -151,8 +151,8 @@ func TestUntrustedSignatureDump(t *testing.T) {
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("untrusted-signature-dump-without-verification",
|
||||
"/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
"/this/does/not/exist")
|
||||
assertTestFailed(t, out, err, "/this/does/not/exist")
|
||||
|
||||
// Error reading signature (input is not a signature)
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -30,21 +31,23 @@ type syncOptions struct {
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
type repoDescriptor struct {
|
||||
DirBasePath string // base path when source is 'dir'
|
||||
TaggedImages []types.ImageReference // List of tagged image found for the repository
|
||||
Context *types.SystemContext // SystemContext for the sync command
|
||||
DirBasePath string // base path when source is 'dir'
|
||||
ImageRefs []types.ImageReference // List of tagged image found for the repository
|
||||
Context *types.SystemContext // SystemContext for the sync command
|
||||
}
|
||||
|
||||
// tlsVerify is an implementation of the Unmarshaler interface, used to
|
||||
// tlsVerifyConfig is an implementation of the Unmarshaler interface, used to
|
||||
// customize the unmarshaling behaviour of the tls-verify YAML key.
|
||||
type tlsVerifyConfig struct {
|
||||
skip types.OptionalBool // skip TLS verification check (false by default)
|
||||
@@ -53,7 +56,7 @@ type tlsVerifyConfig struct {
|
||||
// registrySyncConfig contains information about a single registry, read from
|
||||
// the source YAML file
|
||||
type registrySyncConfig struct {
|
||||
Images map[string][]string // Images map images name to slices with the images' tags
|
||||
Images map[string][]string // Images map images name to slices with the images' references (tags, digests)
|
||||
ImagesByTagRegex map[string]string `yaml:"images-by-tag-regex"` // Images map images name to regular expression with the images' tags
|
||||
Credentials types.DockerAuthConfig // Username and password used to authenticate with the registry
|
||||
TLSVerify tlsVerifyConfig `yaml:"tls-verify"` // TLS verification mode (enabled by default)
|
||||
@@ -93,9 +96,11 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&srcFlags)
|
||||
flags.AddFlagSet(&destFlags)
|
||||
@@ -103,7 +108,7 @@ See skopeo-sync(1) for details.
|
||||
return cmd
|
||||
}
|
||||
|
||||
// unmarshalYAML is the implementation of the Unmarshaler interface method
|
||||
// UnmarshalYAML is the implementation of the Unmarshaler interface method
|
||||
// method for the tlsVerifyConfig type.
|
||||
// It unmarshals the 'tls-verify' YAML key so that, when they key is not
|
||||
// specified, tls verification is enforced.
|
||||
@@ -235,7 +240,7 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesTopCopyFromDir builds a list of image references from the images found
|
||||
// imagesToCopyFromDir builds a list of image references from the images found
|
||||
// in the source directory.
|
||||
// It returns an image reference slice with as many elements as the images found
|
||||
// and any error encountered.
|
||||
@@ -265,11 +270,11 @@ func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
return sourceReferences, nil
|
||||
}
|
||||
|
||||
// imagesTopCopyFromDir builds a list of repository descriptors from the images
|
||||
// imagesToCopyFromRegistry builds a list of repository descriptors from the images
|
||||
// in a registry configuration.
|
||||
// It returns a repository descriptors slice with as many elements as the images
|
||||
// found and any error encountered. Each element of the slice is a list of
|
||||
// tagged image references, to be used as sync source.
|
||||
// image references, to be used as sync source.
|
||||
func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourceCtx types.SystemContext) ([]repoDescriptor, error) {
|
||||
serverCtx := &sourceCtx
|
||||
// override ctx with per-registryName options
|
||||
@@ -277,10 +282,11 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
|
||||
serverCtx.DockerDaemonCertPath = cfg.CertDir
|
||||
serverCtx.DockerDaemonInsecureSkipTLSVerify = (cfg.TLSVerify.skip == types.OptionalBoolTrue)
|
||||
serverCtx.DockerInsecureSkipTLSVerify = cfg.TLSVerify.skip
|
||||
serverCtx.DockerAuthConfig = &cfg.Credentials
|
||||
|
||||
if cfg.Credentials != (types.DockerAuthConfig{}) {
|
||||
serverCtx.DockerAuthConfig = &cfg.Credentials
|
||||
}
|
||||
var repoDescList []repoDescriptor
|
||||
for imageName, tags := range cfg.Images {
|
||||
for imageName, refs := range cfg.Images {
|
||||
repoLogger := logrus.WithFields(logrus.Fields{
|
||||
"repo": imageName,
|
||||
"registry": registryName,
|
||||
@@ -295,24 +301,37 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
|
||||
repoLogger.Info("Processing repo")
|
||||
|
||||
var sourceReferences []types.ImageReference
|
||||
if len(tags) != 0 {
|
||||
for _, tag := range tags {
|
||||
tagLogger := logrus.WithFields(logrus.Fields{"tag": tag})
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error parsing tag, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
if len(refs) != 0 {
|
||||
for _, ref := range refs {
|
||||
tagLogger := logrus.WithFields(logrus.Fields{"ref": ref})
|
||||
var named reference.Named
|
||||
// first try as digest
|
||||
if d, err := digest.Parse(ref); err == nil {
|
||||
named, err = reference.WithDigest(repoRef, d)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
tagLogger.Debugf("Ref was not a digest, trying as a tag: %s", err)
|
||||
named, err = reference.WithTag(repoRef, ref)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error parsing ref, skipping")
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
imageRef, err := docker.NewReference(taggedRef)
|
||||
|
||||
imageRef, err := docker.NewReference(named)
|
||||
if err != nil {
|
||||
tagLogger.Error("Error processing tag, skipping")
|
||||
tagLogger.Error("Error processing ref, skipping")
|
||||
logrus.Errorf("Error getting image reference: %s", err)
|
||||
continue
|
||||
}
|
||||
sourceReferences = append(sourceReferences, imageRef)
|
||||
}
|
||||
} else { // len(tags) == 0
|
||||
} else { // len(refs) == 0
|
||||
repoLogger.Info("Querying registry for image tags")
|
||||
sourceReferences, err = imagesToCopyFromRepo(serverCtx, repoRef)
|
||||
if err != nil {
|
||||
@@ -323,12 +342,12 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No tags to sync found")
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
TaggedImages: sourceReferences,
|
||||
Context: serverCtx})
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
for imageName, tagRegex := range cfg.ImagesByTagRegex {
|
||||
@@ -377,12 +396,12 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
|
||||
}
|
||||
|
||||
if len(sourceReferences) == 0 {
|
||||
repoLogger.Warnf("No tags to sync found")
|
||||
repoLogger.Warnf("No refs to sync found")
|
||||
continue
|
||||
}
|
||||
repoDescList = append(repoDescList, repoDescriptor{
|
||||
TaggedImages: sourceReferences,
|
||||
Context: serverCtx})
|
||||
ImageRefs: sourceReferences,
|
||||
Context: serverCtx})
|
||||
}
|
||||
|
||||
return repoDescList, nil
|
||||
@@ -415,13 +434,13 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), named.String())
|
||||
}
|
||||
desc.TaggedImages = []types.ImageReference{srcRef}
|
||||
desc.ImageRefs = []types.ImageReference{srcRef}
|
||||
} else {
|
||||
desc.TaggedImages, err = imagesToCopyFromRepo(sourceCtx, named)
|
||||
desc.ImageRefs, err = imagesToCopyFromRepo(sourceCtx, named)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.TaggedImages) == 0 {
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
}
|
||||
@@ -437,11 +456,11 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
}
|
||||
desc.DirBasePath = source
|
||||
var err error
|
||||
desc.TaggedImages, err = imagesToCopyFromDir(source)
|
||||
desc.ImageRefs, err = imagesToCopyFromDir(source)
|
||||
if err != nil {
|
||||
return descriptors, err
|
||||
}
|
||||
if len(desc.TaggedImages) == 0 {
|
||||
if len(desc.ImageRefs) == 0 {
|
||||
return descriptors, errors.Errorf("No images to sync found in %q", source)
|
||||
}
|
||||
descriptors = append(descriptors, desc)
|
||||
@@ -509,11 +528,24 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
manifestType, err = parseManifestFormat(opts.format.value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
@@ -534,15 +566,18 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
|
||||
imagesNumber := 0
|
||||
options := copy.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: os.Stdout,
|
||||
DestinationCtx: destinationCtx,
|
||||
ImageListSelection: imageListSelection,
|
||||
OptimizeDestinationImageAlreadyExists: true,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.TaggedImages {
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
var destSuffix string
|
||||
switch ref.Transport() {
|
||||
case docker.Transport:
|
||||
@@ -569,13 +604,13 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}).Infof("Copying image tag %d/%d", counter+1, len(srcRepo.TaggedImages))
|
||||
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return errors.Wrapf(err, "Error copying tag %q", transports.ImageName(ref))
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
|
||||
@@ -2,14 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -57,7 +60,7 @@ type dockerImageOptions struct {
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
registryToken optionalString // token to be used directy as a Bearer token when accessing the registry
|
||||
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
@@ -246,6 +249,21 @@ func parseImageSource(ctx context.Context, opts *imageOptions, name string) (typ
|
||||
return ref.NewImageSource(ctx, sys)
|
||||
}
|
||||
|
||||
// parseManifestFormat parses format parameter for copy and sync command.
|
||||
// It returns string value to use as manifest MIME type
|
||||
func parseManifestFormat(manifestFormat string) (string, error) {
|
||||
switch manifestFormat {
|
||||
case "oci":
|
||||
return imgspecv1.MediaTypeImageManifest, nil
|
||||
case "v2s1":
|
||||
return manifest.DockerV2Schema1SignedMediaType, nil
|
||||
case "v2s2":
|
||||
return manifest.DockerV2Schema2MediaType, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", manifestFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// usageTemplate returns the usage template for skopeo commands
|
||||
// This blocks the displaying of the global options. The main skopeo
|
||||
// command should not use this.
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -37,7 +39,9 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
@@ -72,6 +76,7 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
|
||||
@@ -129,7 +134,9 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
|
||||
defer func() {
|
||||
@@ -149,7 +156,10 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{AuthFilePath: authFile}, res)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: authFile,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
@@ -184,6 +194,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DirForceCompress: true,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
}, res)
|
||||
@@ -194,6 +205,38 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestParseManifestFormat(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
formatParam string
|
||||
expectedManifestType string
|
||||
expectErr bool
|
||||
}{
|
||||
{"oci",
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
false},
|
||||
{"v2s1",
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
false},
|
||||
{"v2s2",
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
false},
|
||||
{"",
|
||||
"",
|
||||
true},
|
||||
{"badValue",
|
||||
"",
|
||||
true},
|
||||
} {
|
||||
manifestType, err := parseManifestFormat(testCase.formatParam)
|
||||
if testCase.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, manifestType, testCase.expectedManifestType)
|
||||
}
|
||||
}
|
||||
|
||||
// since there is a shared authfile image option and a non-shared (prefixed) one, make sure the override logic
|
||||
// works correctly.
|
||||
func TestImageOptionsAuthfileOverride(t *testing.T) {
|
||||
@@ -233,7 +276,8 @@ func TestImageOptionsAuthfileOverride(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
AuthFilePath: testCase.expectedAuthfilePath,
|
||||
AuthFilePath: testCase.expectedAuthfilePath,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}, res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,11 +70,48 @@ _skopeo_copy() {
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_sync() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--dest
|
||||
--dest-authfile
|
||||
--dest-cert-
|
||||
--dest-creds
|
||||
--dest-registry-token string
|
||||
--format
|
||||
--retry-times
|
||||
--sign-by
|
||||
--src
|
||||
--src-authfile
|
||||
--src-cert-dir
|
||||
--src-creds
|
||||
--src-registry-token
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--all
|
||||
--dest-no-creds
|
||||
--dest-tls-verify
|
||||
--remove-signatures
|
||||
--scoped
|
||||
--src-no-creds
|
||||
--src-tls-verify
|
||||
"
|
||||
|
||||
local transports
|
||||
transports="
|
||||
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
--format
|
||||
--retry-times
|
||||
--registry-token
|
||||
"
|
||||
@@ -195,7 +232,7 @@ _skopeo_logout() {
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
# XXX: Changes here need to be refleceted in the manually expanded
|
||||
# XXX: Changes here need to be reflected in the manually expanded
|
||||
# string in the `case` statement below as well.
|
||||
local options_with_args="
|
||||
--policy
|
||||
@@ -229,7 +266,7 @@ _skopeo_skopeo() {
|
||||
)
|
||||
|
||||
case "$prev" in
|
||||
# XXX: Changes here need to be refleceted in $options_with_args as well.
|
||||
# XXX: Changes here need to be reflected in $options_with_args as well.
|
||||
--policy|--registries.d|--override-arch|--override-os|--override-variant|--command-timeout)
|
||||
return
|
||||
;;
|
||||
@@ -259,7 +296,7 @@ _cli_bash_autocomplete() {
|
||||
local counter=1
|
||||
while [ $counter -lt "$cword" ]; do
|
||||
case "${words[$counter]}" in
|
||||
skopeo|copy|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
|
||||
skopeo|copy|sync|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
|
||||
command="${words[$counter]//-/_}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
|
||||
102
contrib/cirrus/runner.sh
Executable file
102
contrib/cirrus/runner.sh
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is intended to be executed by automation or humans
|
||||
# under a hack/get_ci_vm.sh context. Use under any other circumstances
|
||||
# is unlikely to function.
|
||||
|
||||
set -e
|
||||
|
||||
_EOL=20250501
|
||||
if [[ $(date +%Y%m%d) -ge $_EOL ]]; then
|
||||
die "As of $_EOL this branch is probably
|
||||
no longer supported in RHEL 8.4, please
|
||||
confirm this with RHEL Program Management. If so:
|
||||
It should be removed from Cirrus-Cron,
|
||||
the .cirrus.yml file removed, and
|
||||
the VM images (manually) unmarked
|
||||
'permanent=true'"
|
||||
fi
|
||||
|
||||
if [[ -r "/etc/automation_environment" ]]; then
|
||||
source /etc/automation_environment
|
||||
source $AUTOMATION_LIB_PATH/common_lib.sh
|
||||
else
|
||||
(
|
||||
echo "WARNING: It does not appear that containers/automation was installed."
|
||||
echo " Functionality of most of ${BASH_SOURCE[0]} will be negatively"
|
||||
echo " impacted."
|
||||
) > /dev/stderr
|
||||
fi
|
||||
|
||||
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
|
||||
# GCE image-name compatible string representation of distribution _major_ version
|
||||
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
|
||||
# Combined to ease some usage
|
||||
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
|
||||
|
||||
export "PATH=$PATH:$GOPATH/bin"
|
||||
|
||||
podmanmake() {
|
||||
req_env_vars GOPATH SKOPEO_PATH SKOPEO_CI_CONTAINER_FQIN
|
||||
warn "Accumulated technical-debt requires execution inside a --privileged container. This is very likely hiding bugs!"
|
||||
showrun podman run -it --rm --privileged \
|
||||
-e GOPATH=$GOPATH \
|
||||
-v $GOPATH:$GOPATH:Z \
|
||||
-w $SKOPEO_PATH \
|
||||
$SKOPEO_CI_CONTAINER_FQIN \
|
||||
make "$@"
|
||||
}
|
||||
|
||||
_run_setup() {
|
||||
if [[ "$OS_RELEASE_ID" == "fedora" ]]; then
|
||||
# This is required as part of the standard Fedora VM setup
|
||||
growpart /dev/sda 1
|
||||
resize2fs /dev/sda1
|
||||
|
||||
# VM's come with the distro. skopeo pre-installed
|
||||
dnf erase -y skopeo
|
||||
else
|
||||
die "Unknown/unsupported distro. $OS_REL_VER"
|
||||
fi
|
||||
}
|
||||
|
||||
_run_vendor() {
|
||||
podmanmake vendor BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_build() {
|
||||
podmanmake bin/skopeo BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_validate() {
|
||||
podmanmake validate-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_unit() {
|
||||
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_integration() {
|
||||
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
_run_system() {
|
||||
# Ensure we start with a clean-slate
|
||||
podman system reset --force
|
||||
# Executes with containers required for testing.
|
||||
showrun make test-system-local BUILDTAGS="$BUILDTAGS"
|
||||
}
|
||||
|
||||
req_env_vars SKOPEO_PATH BUILDTAGS
|
||||
|
||||
handler="_run_${1}"
|
||||
if [ "$(type -t $handler)" != "function" ]; then
|
||||
die "Unknown/Unsupported command-line argument '$1'"
|
||||
fi
|
||||
|
||||
msg "************************************************************"
|
||||
msg "Runner executing $1 on $OS_REL_VER"
|
||||
msg "************************************************************"
|
||||
|
||||
cd "$SKOPEO_PATH"
|
||||
$handler
|
||||
@@ -16,6 +16,13 @@ using the latest Fedora and then Skopeo is installed into them:
|
||||
* quay.io/skopeo/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with skopeoimage/upstream/Dockerfile.
|
||||
* quay.io/skopeo/testing - This image is built using the latest version of Skopeo that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with skopeoimage/testing/Dockerfile.
|
||||
|
||||
## Multiarch images
|
||||
|
||||
Multiarch images are available for Skopeo upstream and stable versions. Supported architectures are `amd64`, `s390x`, `ppc64le`.
|
||||
Available images are `quay.io/skopeo/upstream:master`, `quay.io/skopeo/stable:v1.2.0`, `quay.io/containers/skopeo:v1.2.0`.
|
||||
|
||||
Images can be used the same way as in a single architecture case, no extra setup is required. For samples see next chapter.
|
||||
|
||||
## Sample Usage
|
||||
|
||||
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:32
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
@@ -27,7 +27,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/auth.json
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:32
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
@@ -28,7 +28,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/auth.json
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:32
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
@@ -30,7 +30,7 @@ git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/conta
|
||||
export GOPATH=/root/skopeo; \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo; \
|
||||
make bin/skopeo;\
|
||||
make install;\
|
||||
make PREFIX=/usr install;\
|
||||
rm -rf /root/skopeo/*; \
|
||||
yum -y remove git golang go-md2man make; \
|
||||
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
|
||||
@@ -48,7 +48,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
|
||||
RUN echo skopeo:100000:65536 > /etc/subgid
|
||||
|
||||
# Point to the Authorization file
|
||||
ENV REGISTRY_AUTH_FILE=/auth.json
|
||||
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/bin/skopeo"]
|
||||
|
||||
@@ -42,6 +42,10 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--digestfile** _path_
|
||||
|
||||
After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
@@ -1,41 +1,61 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
**skopeo inspect** [*options*] _image-name_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
## OPTIONS
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
**--authfile** _path_
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
**--cert-dir** _path_
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
|
||||
**--authfile** _path_
|
||||
**--config**
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
Output configuration in OCI format, default is to format in JSON format.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry.
|
||||
**--creds** _username[:password]_
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
|
||||
Username and password for accessing the registry.
|
||||
|
||||
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
**--format**, **-f**=*format*
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
Format the output using the given Go template.
|
||||
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
**--no-creds**
|
||||
|
||||
**--registry-token** _Bearer token_ for accessing the registry.
|
||||
Access the registry anonymously.
|
||||
|
||||
**--raw**
|
||||
|
||||
Output raw manifest or config data depending on --config option.
|
||||
The --format option is not supported with --raw option.
|
||||
|
||||
**--registry-token** _Bearer token_
|
||||
|
||||
Registry token for accessing the registry.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--tls-verify**
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true).
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -46,14 +66,14 @@ $ skopeo inspect docker://docker.io/fedora
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
@@ -61,15 +81,24 @@ $ skopeo inspect docker://docker.io/fedora
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
|
||||
amd64
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
|
||||
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - Return a list of tags the transport-specific image repository
|
||||
skopeo\-list\-tags - Return a list of tags for the transport-specific image repository.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** _repository-name_
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-login(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-login - Login to a container registry
|
||||
skopeo\-login - Login to a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo login** [*options*] *registry*
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-logout(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-logout - Logout of a container registry
|
||||
skopeo\-logout - Logout of a container registry.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo logout** [*options*] *registry*
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
skopeo\-standalone\-verify - Verify an image signature.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
@@ -32,6 +32,11 @@ When the `--scoped` option is specified, images are prefixed with the source ima
|
||||
name can be stored at _destination_.
|
||||
|
||||
## OPTIONS
|
||||
**--all**
|
||||
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
|
||||
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
|
||||
the images in the list, and the list itself.
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
|
||||
@@ -49,6 +54,8 @@ Path of the authentication file for the destination registry. Uses path given by
|
||||
|
||||
**--dest** _transport_ Destination transport.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source).
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
@@ -147,6 +154,7 @@ registry.example.com:
|
||||
redis:
|
||||
- "1.0"
|
||||
- "2.0"
|
||||
- "sha256:0000000000000000000000000000000011111111111111111111111111111111"
|
||||
images-by-tag-regex:
|
||||
nginx: ^1\.13\.[12]-alpine-perl$
|
||||
credentials:
|
||||
@@ -166,14 +174,14 @@ skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
|
||||
```
|
||||
This will copy the following images:
|
||||
- Repository `registry.example.com/busybox`: all images, as no tags are specified.
|
||||
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0".
|
||||
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0" along with image with digest "sha256:0000000000000000000000000000000011111111111111111111111111111111".
|
||||
- Repository `registry.example.com/nginx`: images tagged "1.13.1-alpine-perl" and "1.13.2-alpine-perl".
|
||||
- Repository `quay.io/coreos/etcd`: images tagged "latest".
|
||||
|
||||
For the registry `registry.example.com`, the "john"/"this is a secret" credentials are used, with server TLS certificates located at `/home/john/certs`.
|
||||
|
||||
TLS verification is normally enabled, and it can be disabled setting `tls-verify` to `false`.
|
||||
In the above example, TLS verification is enabled for `reigstry.example.com`, while is
|
||||
In the above example, TLS verification is enabled for `registry.example.com`, while is
|
||||
disabled for `quay.io`.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
24
go.mod
24
go.mod
@@ -3,25 +3,23 @@ module github.com/containers/skopeo
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.22.0
|
||||
github.com/containers/image/v5 v5.6.0
|
||||
github.com/containers/ocicrypt v1.0.3
|
||||
github.com/containers/storage v1.23.5
|
||||
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/containers/common v0.38.12
|
||||
github.com/containers/image/v5 v5.12.0
|
||||
github.com/containers/ocicrypt v1.1.1
|
||||
github.com/containers/storage v1.31.3
|
||||
github.com/docker/docker v20.10.6+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
|
||||
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
|
||||
github.com/opencontainers/runc v1.0.0-rc92 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
61
hack/get_ci_vm.sh
Executable file
61
hack/get_ci_vm.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#
|
||||
# For help and usage information, simply execute the script w/o any arguments.
|
||||
#
|
||||
# This script is intended to be run by Red Hat skopeo developers who need
|
||||
# to debug problems specifically related to Cirrus-CI automated testing.
|
||||
# It requires that you have been granted prior access to create VMs in
|
||||
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
|
||||
# with supervision upon request.
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||
|
||||
# Help detect if we were called by get_ci_vm container
|
||||
GET_CI_VM="${GET_CI_VM:-0}"
|
||||
in_get_ci_vm() {
|
||||
if ((GET_CI_VM==0)); then
|
||||
echo "Error: $1 is not intended for use in this context"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
# get_ci_vm APIv1 container entrypoint calls into this script
|
||||
# to obtain required repo. specific configuration options.
|
||||
if [[ "$1" == "--config" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
cat <<EOF
|
||||
DESTDIR="/var/tmp/go/src/github.com/containers/skopeo"
|
||||
UPSTREAM_REPO="https://github.com/containers/skopeo.git"
|
||||
GCLOUD_PROJECT="skopeo"
|
||||
GCLOUD_IMGPROJECT="libpod-218412"
|
||||
GCLOUD_CFG="skopeo"
|
||||
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
|
||||
GCLOUD_CPUS="2"
|
||||
GCLOUD_MEMORY="4Gb"
|
||||
GCLOUD_DISK="200"
|
||||
EOF
|
||||
elif [[ "$1" == "--setup" ]]; then
|
||||
in_get_ci_vm "$1"
|
||||
# get_ci_vm container entrypoint calls us with this option on the
|
||||
# Cirrus-CI environment instance, to perform repo.-specific setup.
|
||||
echo "+ Executing setup" > /dev/stderr
|
||||
${GOSRC}/${SCRIPT_BASE}/runner.sh setup
|
||||
else
|
||||
# Create and access VM for specified Cirrus-CI task
|
||||
mkdir -p $HOME/.config/gcloud/ssh
|
||||
podman run -it --rm \
|
||||
--tz=local \
|
||||
-e NAME="$USER" \
|
||||
-e SRCDIR=/src \
|
||||
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
|
||||
-e DEBUG="${DEBUG:-0}" \
|
||||
-v $REPO_DIRPATH:/src:O \
|
||||
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
|
||||
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
|
||||
quay.io/libpod/get_ci_vm:latest "$@"
|
||||
fi
|
||||
11
hack/make.sh
11
hack/make.sh
@@ -25,12 +25,8 @@ export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
# We're a nice, sexy, little shell script, and people might try to run us;
|
||||
# but really, they shouldn't. We want to be in a container!
|
||||
inContainer="AssumeSoInitially"
|
||||
if [ "$PWD" != "/go/src/$SKOPEO_PKG" ]; then
|
||||
unset inContainer
|
||||
fi
|
||||
|
||||
if [ -z "$inContainer" ]; then
|
||||
# The magic value is defined inside our Dockerfile.
|
||||
if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
|
||||
{
|
||||
echo "# WARNING! I don't seem to be running in a Docker container."
|
||||
echo "# The result of this command might be an incorrect build, and will not be"
|
||||
@@ -39,6 +35,9 @@ if [ -z "$inContainer" ]; then
|
||||
echo "# Try this instead: make all"
|
||||
echo "#"
|
||||
} >&2
|
||||
else
|
||||
echo "# I appear to be running inside my designated container image, good!"
|
||||
export SKOPEO_CONTAINER_TESTS=1
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -5,7 +5,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
VALIDATE_BRANCH='main'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
|
||||
@@ -9,6 +9,6 @@ bundle_test_integration() {
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
make PREFIX=/usr install
|
||||
bundle_test_integration
|
||||
) 2>&1
|
||||
|
||||
@@ -12,7 +12,7 @@ sed -i \
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
make PREFIX=/usr install
|
||||
|
||||
# Run tests
|
||||
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go version
|
||||
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local bin/skopeo
|
||||
sudo make install
|
||||
skopeo -v
|
||||
171
install.md
171
install.md
@@ -1,76 +1,123 @@
|
||||
# Installing from packages
|
||||
|
||||
`skopeo` may already be packaged in your distribution, for example on
|
||||
RHEL/CentOS ≥ 8 or Fedora you can install it using:
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
|
||||
### Fedora
|
||||
|
||||
```sh
|
||||
$ sudo dnf install skopeo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
on RHEL/CentOS ≤ 7.x:
|
||||
### RHEL/CentOS ≥ 8 and CentOS Stream
|
||||
|
||||
```sh
|
||||
$ sudo yum install skopeo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
for openSUSE:
|
||||
Newer Skopeo releases may be available on the repositories provided by the
|
||||
Kubic project. Beware, these may not be suitable for production environments.
|
||||
|
||||
on CentOS 8:
|
||||
|
||||
```sh
|
||||
$ sudo zypper install skopeo
|
||||
sudo dnf -y module disable container-tools
|
||||
sudo dnf -y install 'dnf-command(copr)'
|
||||
sudo dnf -y copr enable rhcontainerbot/container-selinux
|
||||
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
on alpine:
|
||||
on CentOS 8 Stream:
|
||||
|
||||
```sh
|
||||
$ sudo apk add skopeo
|
||||
sudo dnf -y module disable container-tools
|
||||
sudo dnf -y install 'dnf-command(copr)'
|
||||
sudo dnf -y copr enable rhcontainerbot/container-selinux
|
||||
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
on macOS:
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
$ brew install skopeo
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
Debian (10 and newer including Raspbian) and Ubuntu (18.04 and newer): Packages
|
||||
are available via the [Kubic][0] project repositories:
|
||||
### openSUSE
|
||||
|
||||
[0]: https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
brew install skopeo
|
||||
```
|
||||
|
||||
### Nix / NixOS
|
||||
```sh
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available in
|
||||
the [Bullseye (testing) branch](https://packages.debian.org/bullseye/skopeo), which
|
||||
will be the next stable release (Debian 11) as well as Debian Unstable/Sid.
|
||||
|
||||
```bash
|
||||
# Debian Unstable/Sid:
|
||||
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O- | sudo apt-key add -
|
||||
# Debian Testing/Bullseye or Unstable/Sid
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
```bash
|
||||
# Debian Testing:
|
||||
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O- | sudo apt-key add -
|
||||
```
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
so it is fully compatible with Debian's arm64 repository.
|
||||
You can simply follow the [steps for Debian](#debian) to install Skopeo.
|
||||
|
||||
|
||||
### Ubuntu
|
||||
|
||||
The skopeo package is available in the official repositories for Ubuntu 20.10
|
||||
and newer.
|
||||
|
||||
```bash
|
||||
# Debian 10:
|
||||
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O- | sudo apt-key add -
|
||||
# Ubuntu 20.10 and newer
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
```bash
|
||||
# Raspbian 10:
|
||||
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O- | sudo apt-key add -
|
||||
```
|
||||
If you would prefer newer (though not as well-tested) packages,
|
||||
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS).
|
||||
Checkout the [Kubic project page](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
for a list of supported Ubuntu version and
|
||||
architecture combinations. **NOTE:** The command `sudo apt-get -y upgrade`
|
||||
maybe required in some cases if Skopeo cannot be installed without it.
|
||||
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/skopeo/-/tree/debian/debian).
|
||||
|
||||
CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
|
||||
OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
|
||||
|
||||
```bash
|
||||
# Ubuntu (18.04, 19.04 and 19.10):
|
||||
$ . /etc/os-release
|
||||
$ sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
|
||||
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O- | sudo apt-key add -
|
||||
. /etc/os-release
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update -qq
|
||||
$ sudo apt-get install skopeo
|
||||
```
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
@@ -79,6 +126,8 @@ To build the `skopeo` binary you need at least Go 1.12.
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
||||
## Building from Source
|
||||
|
||||
### Building without a container
|
||||
|
||||
Building without a container requires a bit more manual work and setup in your
|
||||
@@ -92,43 +141,32 @@ Install the necessary dependencies:
|
||||
|
||||
```bash
|
||||
# Fedora:
|
||||
$ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
|
||||
sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
|
||||
```
|
||||
|
||||
```bash
|
||||
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
|
||||
$ sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
|
||||
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
|
||||
```
|
||||
|
||||
```bash
|
||||
# macOS:
|
||||
$ brew install gpgme
|
||||
brew install gpgme
|
||||
```
|
||||
|
||||
```bash
|
||||
# openSUSE:
|
||||
$ sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
|
||||
sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
|
||||
```
|
||||
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
$ cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
|
||||
git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
|
||||
Building in a container is simpler, but more restrictive:
|
||||
|
||||
- It requires the `podman` command and the ability to run Linux containers
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries
|
||||
which may only be available only in a container of a similar Linux
|
||||
distribution.
|
||||
|
||||
```bash
|
||||
$ make binary # Or (make all) to also build documentation, see below.
|
||||
```
|
||||
By default the `make` command (make all) will build bin/skopeo and the documentation locally.
|
||||
|
||||
### Building documentation
|
||||
|
||||
@@ -136,18 +174,31 @@ To build the manual you will need go-md2man.
|
||||
|
||||
```bash
|
||||
# Debian:
|
||||
$ sudo apt-get install go-md2man
|
||||
sudo apt-get install go-md2man
|
||||
```
|
||||
|
||||
```
|
||||
# Fedora:
|
||||
$ sudo dnf install go-md2man
|
||||
sudo dnf install go-md2man
|
||||
```
|
||||
|
||||
Then
|
||||
|
||||
```bash
|
||||
$ make docs
|
||||
make docs
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
|
||||
Building in a container is simpler, but more restrictive:
|
||||
|
||||
- It requires the `podman` command and the ability to run Linux containers.
|
||||
- The created executable is a Linux executable, and depends on dynamic libraries
|
||||
which may only be available only in a container of a similar Linux
|
||||
distribution.
|
||||
|
||||
```bash
|
||||
$ make binary
|
||||
```
|
||||
|
||||
### Installation
|
||||
@@ -155,5 +206,5 @@ $ make docs
|
||||
Finally, after the binary and documentation is built:
|
||||
|
||||
```bash
|
||||
$ sudo make install
|
||||
sudo make install
|
||||
```
|
||||
|
||||
@@ -101,8 +101,8 @@ func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
|
||||
// copy to private registry using local authentication
|
||||
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://docker.io/library/busybox:latest", imageName)
|
||||
// inspec from private registry
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
|
||||
// inspect from private registry
|
||||
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
|
||||
// logout from the registry
|
||||
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
|
||||
|
||||
@@ -31,6 +31,8 @@ const (
|
||||
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
|
||||
v2s1DockerRegistryURL = "localhost:5556"
|
||||
knownWindowsOnlyImage = "docker://mcr.microsoft.com/windows/nanoserver:1909"
|
||||
knownListImageRepo = "docker://registry.fedoraproject.org/fedora-minimal"
|
||||
knownListImage = knownListImageRepo + ":38"
|
||||
)
|
||||
|
||||
type CopySuite struct {
|
||||
@@ -99,14 +101,14 @@ func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-all-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "dir:"+dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
@@ -122,7 +124,7 @@ func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir1, "oci:"+oci2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci2, "dir:"+dir2)
|
||||
@@ -144,9 +146,9 @@ func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--format", "oci", "docker://estesp/busybox:latest", "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
@@ -166,9 +168,9 @@ func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", "docker://estesp/busybox:latest", "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
@@ -180,7 +182,7 @@ func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--all", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--all", knownListImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
@@ -194,8 +196,8 @@ func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
@@ -212,9 +214,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", "docker://estesp/busybox:latest", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
@@ -233,18 +235,33 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
oci2, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
|
||||
tempdir, err := ioutil.TempDir("", "tempdir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tempdir)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
digestOutPath := filepath.Join(tempdir, "digest.txt")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
|
||||
readDigest, err := ioutil.ReadFile(digestOutPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = digest.Parse(string(readDigest))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -256,13 +273,13 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
@@ -278,13 +295,13 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
|
||||
runDecompressDirs(c, "", dir1, dir2)
|
||||
assertDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
@@ -294,14 +311,14 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
_, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m)))
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
@@ -316,7 +333,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
@@ -326,8 +343,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -352,7 +369,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
@@ -362,8 +379,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
err = json.Unmarshal([]byte(i1), &image1)
|
||||
@@ -388,7 +405,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
@@ -398,9 +415,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
|
||||
var image1 imgspecv1.Image
|
||||
@@ -424,7 +441,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
digest := manifestDigest.String()
|
||||
@@ -434,8 +451,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
c.Assert(err, check.IsNil)
|
||||
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test:latest")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
|
||||
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
|
||||
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
|
||||
var image1 imgspecv1.Image
|
||||
@@ -464,16 +481,16 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
c.Assert(image5.Architecture, check.Equals, "arm64")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesntMatchRuntimeOS(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-fails-image-doesnt-match-runtime")
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-fails-image-does-not-match-runtime")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySucceedsWhenImageDoesntMatchRuntimeButWeOverride(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-succeeds-image-doesnt-match-runtime-but-override")
|
||||
func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-succeeds-image-does-not-match-runtime-but-override")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
@@ -490,12 +507,12 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
|
||||
// "push": dir: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
|
||||
// The result of pushing and pulling is an equivalent image, except for schema1 embedded names.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:unsigned")
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:unsigned")
|
||||
}
|
||||
|
||||
// The most basic (skopeo copy) use:
|
||||
@@ -511,28 +528,28 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause", "dir:"+dir1)
|
||||
// "push": dir: → docker(v2s2):
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"busybox:unsigned")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned")
|
||||
// The result of pushing and pulling is an unmodified image.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"busybox:unsigned", "dir:"+dir2)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"pause:unsigned", "dir:"+dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
|
||||
// docker v2s2 -> OCI image layout with image name
|
||||
// ociDest will be created by oci: if it doesn't exist
|
||||
// so don't create it here to exercise auto-creation
|
||||
ociDest := "busybox-latest-image"
|
||||
ociImgName := "busybox"
|
||||
ociDest := "pause-latest-image"
|
||||
ociImgName := "pause"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
ociDest = "busybox-latest-noimage"
|
||||
ociDest = "pause-latest-noimage"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -586,7 +603,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
|
||||
// Copy a standard busybox image locally
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:1.31.1", "oci:"+originalImageDir+":latest")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":1.30.1", "oci:"+originalImageDir+":latest")
|
||||
|
||||
// Encrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--encryption-key",
|
||||
@@ -617,7 +634,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
matchLayerBlobBinaryType(c, decryptedImgDir+"/blobs/sha256", "application/x-gzip", 1)
|
||||
|
||||
// Copy a standard multi layer nginx image locally
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://nginx:1.17.8", "oci:"+multiLayerImageDir+":latest")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQINMultiLayer, "oci:"+multiLayerImageDir+":latest")
|
||||
|
||||
// Partially encrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key",
|
||||
@@ -722,11 +739,11 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN64, "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
// Compare (copies of) the original and the copy:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:streaming")
|
||||
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:streaming")
|
||||
// FIXME: Also check pushing to docker://
|
||||
}
|
||||
|
||||
@@ -746,7 +763,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
defer os.RemoveAll(oci2)
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
|
||||
// Docker -> OCI
|
||||
@@ -797,16 +814,16 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// type: reject
|
||||
assertSkopeoFails(c, ".*Source image rejected: Running image docker://busybox:latest is rejected by policy.*",
|
||||
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
|
||||
assertSkopeoFails(c, fmt.Sprintf(".*Source image rejected: Running image %s:latest is rejected by policy.*", testFQIN),
|
||||
"--policy", policy, "copy", testFQIN+":latest", dirDest)
|
||||
|
||||
// type: insecureAcceptAnything
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://openshift/hello-openshift", dirDest)
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest)
|
||||
|
||||
// type: signedBy
|
||||
// Sign the images
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "docker://busybox:1.26", "atomic:localhost:5006/myns/personal:personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", "docker://busybox:1.26.1", "atomic:localhost:5006/myns/official:official")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", testFQIN+":1.26", "atomic:localhost:5006/myns/personal:personal")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", testFQIN+":1.26.1", "atomic:localhost:5006/myns/official:official")
|
||||
// Verify that we can pull them
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest)
|
||||
@@ -860,10 +877,10 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get some images.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":armfh", topDirDest+"/dir1")
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN+":s390x", topDirDest+"/dir2")
|
||||
|
||||
// Sign the images. By coping fom a topDirDest/dirN, also test that non-/restricted paths
|
||||
// Sign the images. By coping from a topDirDest/dirN, also test that non-/restricted paths
|
||||
// use the dir:"" default of insecureAcceptAnything.
|
||||
// (For signing, we must push to atomic: to get a Docker identity to use in the signature.)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "personal@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging")
|
||||
@@ -977,7 +994,7 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", ourRegistry+"original/busybox")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox")
|
||||
// Pulling an unsigned image fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
|
||||
@@ -1031,7 +1048,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, "atomic:localhost:5000/myns/extension:unsigned")
|
||||
// Pulling an unsigned image using atomic: fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy,
|
||||
@@ -1055,7 +1072,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
|
||||
// Get another image (different so that they don't share signatures, and sign it using docker://)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
|
||||
"copy", "--sign-by", "personal@example.com", "docker://estesp/busybox:ppc64le", "docker://localhost:5000/myns/extension:extension")
|
||||
"copy", "--sign-by", "personal@example.com", testFQIN+":ppc64le", "docker://localhost:5000/myns/extension:extension")
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
|
||||
// Pulling the image using atomic: succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
|
||||
@@ -1067,6 +1084,25 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
// copyWithSignedIdentity creates a copy of an unsigned image, adding a signature for an unrelated identity
|
||||
// This should be easier than using standalone-sign.
|
||||
func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, registriesDir string) {
|
||||
topDir, err := ioutil.TempDir("", "copyWithSignedIdentity")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
|
||||
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
|
||||
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
|
||||
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
|
||||
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
|
||||
}
|
||||
|
||||
// Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json
|
||||
func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
const regPrefix = "docker://localhost:5006/myns/mirroring-"
|
||||
|
||||
@@ -1077,7 +1113,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "mirrored-signatures") // FIXME: Will this be used?
|
||||
topDir, err := ioutil.TempDir("", "mirrored-signatures")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable sigstore use
|
||||
@@ -1092,7 +1128,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
|
||||
|
||||
// Get an image to work with.
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://busybox", regPrefix+"primary:unsigned")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN, regPrefix+"primary:unsigned")
|
||||
// Verify that unsigned images are rejected
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", dirDest)
|
||||
@@ -1111,14 +1147,10 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
|
||||
|
||||
// Create a signature for mirroring-primary:primary-signed without pushing there. This should be easier than using standalone-sign.
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", "dir:"+signingDir)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), "localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com")
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, regPrefix+"mirror:primary-signed")
|
||||
// Create a signature for mirroring-primary:primary-signed without pushing there.
|
||||
copyWithSignedIdentity(c, regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed",
|
||||
"localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com",
|
||||
registriesDir)
|
||||
// Verify that a correctly signed image for the primary is accessible using the primary's reference
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest)
|
||||
// … but verify that while it is accessible using the mirror location
|
||||
@@ -1126,10 +1158,24 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// … verify it is NOT accessible when requiring a signature.
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:primary-signed", dirDest)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", "--dest-tls-verify=false", regPrefix+"primary:unsigned", regPrefix+"remap:remapped")
|
||||
// Verify that while a remapIdentity image is accessible using the remapped (mirror) location
|
||||
assertSkopeoSucceeds(c, "" /* no --policy */, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// … it is NOT accessible when requiring a signature …
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// … until signed.
|
||||
copyWithSignedIdentity(c, regPrefix+"remap:remapped", regPrefix+"remap:remapped",
|
||||
"localhost:5006/myns/mirroring-primary:remapped", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
assertSkopeoFails(c, ".*Error initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
@@ -1137,15 +1183,15 @@ func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWOTLSVerifyFalse(c *check.C) {
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
// dir:test isn't created beforehand just because we already know this could
|
||||
@@ -1171,7 +1217,7 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
|
||||
// oci to v2s1 and vice-versa not supported yet
|
||||
// get v2s2 manifest type
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+srcDir)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+srcDir)
|
||||
verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType)
|
||||
// convert from v2s2 to oci
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1)
|
||||
@@ -1201,7 +1247,7 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
|
||||
// Ensure we are working with a schema2 image.
|
||||
// dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can.
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+input2Dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+input2Dir)
|
||||
verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType)
|
||||
// 2→2 (the "f2t2" in tag means "from 2 to 2")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2")
|
||||
@@ -1221,14 +1267,6 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
|
||||
@@ -34,10 +34,27 @@
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5006/myns/mirroring-remap": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "remapIdentity",
|
||||
"prefix": "localhost:5006/myns/mirroring-remap",
|
||||
"signedPrefix": "localhost:5006/myns/mirroring-primary"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
],
|
||||
"quay.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
},
|
||||
"dir": {
|
||||
|
||||
@@ -19,8 +19,8 @@ to start a container, then within the container:
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
cd ..; make bin/skopeo install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
curl -L -v 'http://localhost:5000/v2/'
|
||||
cat ~/.docker/config.json
|
||||
|
||||
@@ -11,8 +11,26 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/go-check/check"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// A repository with a path with multiple components in it which
|
||||
// contains multiple tags, preferably with some tags pointing to
|
||||
// manifest lists, and with some tags that don't.
|
||||
pullableRepo = "quay.io/libpod/testimage"
|
||||
// A tagged image in the repository that we can inspect and copy.
|
||||
pullableTaggedImage = "registry.k8s.io/coredns/coredns:v1.6.6"
|
||||
// A tagged manifest list in the repository that we can inspect and copy.
|
||||
pullableTaggedManifestList = "registry.k8s.io/coredns/coredns:v1.8.0"
|
||||
// A repository containing multiple tags, some of which are for
|
||||
// manifest lists, and which includes a "latest" tag. We specify the
|
||||
// name here without a tag.
|
||||
pullableRepoWithLatestTag = "registry.k8s.io/pause"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -95,7 +113,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "busybox:latest"
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
@@ -117,9 +135,37 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
|
||||
// sync docker => dir
|
||||
assertSkopeoSucceeds(c, "", "sync", "--all", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://"+image, "dir:"+dir2)
|
||||
_, err = os.Stat(path.Join(dir2, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "busybox:latest"
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
@@ -127,7 +173,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
@@ -139,29 +185,29 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "busybox:latest"
|
||||
image := pullableRepoWithLatestTag
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
// make a copy of the image in the local registry
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, image))
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
|
||||
|
||||
//sync upstream image to dir, not scoped
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
//sync local registry image to dir, not scoped
|
||||
assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, image), dir1)
|
||||
assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
|
||||
//sync local registry image to dir, scoped
|
||||
imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, image)))
|
||||
imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference()))))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath = imageRef.DockerReference().String()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, image), dir1)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
os.RemoveAll(dir1)
|
||||
@@ -174,7 +220,7 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "alpine"
|
||||
image := pullableRepo
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
@@ -198,7 +244,7 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
image := "alpine"
|
||||
image := pullableRepo
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().Name()
|
||||
@@ -209,23 +255,22 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
c.Check(len(tags), check.Not(check.Equals), 0)
|
||||
|
||||
yamlConfig := fmt.Sprintf(`
|
||||
docker.io:
|
||||
%s:
|
||||
images:
|
||||
%s:
|
||||
`, image)
|
||||
%s: []
|
||||
`, reference.Domain(imageRef.DockerReference()), reference.Path(imageRef.DockerReference()))
|
||||
|
||||
//sync to the local reg
|
||||
// sync to the local registry
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
|
||||
// sync back from local reg to a folder
|
||||
// sync back from local registry to a folder
|
||||
os.Remove(yamlFile)
|
||||
yamlConfig = fmt.Sprintf(`
|
||||
%s:
|
||||
tls-verify: false
|
||||
images:
|
||||
%s:
|
||||
|
||||
%s: []
|
||||
`, v2DockerRegistryURL, imagePath)
|
||||
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
@@ -234,7 +279,9 @@ docker.io:
|
||||
sysCtx = types.SystemContext{
|
||||
DockerInsecureSkipTLSVerify: types.NewOptionalBool(true),
|
||||
}
|
||||
localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef)
|
||||
localImageRef, err := docker.ParseReference(fmt.Sprintf("//%s/%s", v2DockerRegistryURL, imagePath))
|
||||
c.Assert(err, check.IsNil)
|
||||
localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, localImageRef)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(localTags), check.Not(check.Equals), 0)
|
||||
c.Assert(len(localTags), check.Equals, len(tags))
|
||||
@@ -262,9 +309,9 @@ func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
docker.io:
|
||||
registry.k8s.io:
|
||||
images-by-tag-regex:
|
||||
nginx: ^1\.13\.[12]-alpine-perl$ # regex string test
|
||||
pause: ^[12]\.0$ # regex string test
|
||||
`
|
||||
// the ↑ regex strings always matches only 2 images
|
||||
var nTags = 2
|
||||
@@ -289,6 +336,37 @@ docker.io:
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
registry.k8s.io:
|
||||
images:
|
||||
pause:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
`
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, 1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -296,21 +374,21 @@ func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
docker.io:
|
||||
registry.k8s.io:
|
||||
images:
|
||||
busybox:
|
||||
- latest
|
||||
- musl
|
||||
alpine:
|
||||
- edge
|
||||
- 3.8
|
||||
opensuse/leap:
|
||||
coredns/coredns:
|
||||
- v1.8.0
|
||||
- v1.7.1
|
||||
k8s-dns-kube-dns:
|
||||
- 1.14.12
|
||||
- 1.14.13
|
||||
pause:
|
||||
- latest
|
||||
|
||||
quay.io:
|
||||
images:
|
||||
quay/busybox:
|
||||
- latest`
|
||||
quay/busybox:
|
||||
- latest`
|
||||
|
||||
// get the number of tags
|
||||
re := regexp.MustCompile(`^ +- +[^:/ ]+`)
|
||||
@@ -347,7 +425,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
image := "busybox"
|
||||
image := pullableRepoWithLatestTag
|
||||
tag := "latest"
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
@@ -396,6 +474,26 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
destDir1 := filepath.Join(tmpDir, "dest1")
|
||||
destDir2 := filepath.Join(tmpDir, "dest2")
|
||||
destDir3 := filepath.Join(tmpDir, "dest3")
|
||||
|
||||
//Split image:tag path from image URI for manifest comparison
|
||||
imageDir := pullableTaggedImage[strings.LastIndex(pullableTaggedImage, "/")+1:]
|
||||
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3)
|
||||
verifyManifestMIMEType(c, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
@@ -404,7 +502,7 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "busybox:latest"
|
||||
image := pullableTaggedImage
|
||||
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
@@ -437,7 +535,7 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := "busybox:latest"
|
||||
image := pullableRepoWithLatestTag
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
err = os.Mkdir(dir1, 0755)
|
||||
@@ -446,6 +544,10 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
err = os.Mkdir(dir2, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// create leading dirs
|
||||
err = os.MkdirAll(path.Dir(path.Join(dir1, image)), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+path.Join(dir1, image))
|
||||
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
|
||||
@@ -454,9 +556,13 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
// sync dir => docker
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", dir1, v2DockerRegistryURL)
|
||||
|
||||
// create leading dirs
|
||||
err = os.MkdirAll(path.Dir(path.Join(dir2, image)), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+image, "dir:"+path.Join(dir2, image))
|
||||
_, err = os.Stat(path.Join(path.Join(dir2, image), "manifest.json"))
|
||||
_, err = os.Stat(path.Join(dir2, image, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
@@ -519,7 +625,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesdotexist")
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@@ -10,12 +10,17 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
const skopeoBinary = "skopeo"
|
||||
const decompressDirsBinary = "./decompress-dirs.sh"
|
||||
|
||||
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
|
||||
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
|
||||
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
|
||||
|
||||
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
|
||||
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -200,3 +205,11 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
|
||||
}
|
||||
}
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
67
nix/default-arm64.nix
Normal file
67
nix/default-arm64.nix
Normal file
@@ -0,0 +1,67 @@
|
||||
let
|
||||
pkgs = (import ./nixpkgs.nix {
|
||||
crossSystem = {
|
||||
config = "aarch64-unknown-linux-gnu";
|
||||
};
|
||||
config = {
|
||||
packageOverrides = pkg: {
|
||||
gpgme = (static pkg.gpgme);
|
||||
libassuan = (static pkg.libassuan);
|
||||
libgpgerror = (static pkg.libgpgerror);
|
||||
libseccomp = (static pkg.libseccomp);
|
||||
glib = (static pkg.glib).overrideAttrs (x: {
|
||||
outputs = [ "bin" "out" "dev" ];
|
||||
mesonFlags = [
|
||||
"-Ddefault_library=static"
|
||||
"-Ddevbindir=${placeholder ''dev''}/bin"
|
||||
"-Dgtk_doc=false"
|
||||
"-Dnls=disabled"
|
||||
];
|
||||
postInstall = ''
|
||||
moveToOutput "share/glib-2.0" "$dev"
|
||||
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
|
||||
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
|
||||
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
|
||||
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
|
||||
'';
|
||||
});
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
static = pkg: pkg.overrideAttrs (x: {
|
||||
doCheck = false;
|
||||
configureFlags = (x.configureFlags or [ ]) ++ [
|
||||
"--without-shared"
|
||||
"--disable-shared"
|
||||
];
|
||||
dontDisableStatic = true;
|
||||
enableSharedExecutables = false;
|
||||
enableStatic = true;
|
||||
});
|
||||
|
||||
self = with pkgs; buildGoModule rec {
|
||||
name = "skopeo";
|
||||
src = ./..;
|
||||
vendorSha256 = null;
|
||||
doCheck = false;
|
||||
enableParallelBuilding = true;
|
||||
outputs = [ "out" ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
|
||||
prePatch = ''
|
||||
export CFLAGS='-static -pthread'
|
||||
export LDFLAGS='-s -w -static-libgcc -static'
|
||||
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
|
||||
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
|
||||
'';
|
||||
buildPhase = ''
|
||||
patchShebangs .
|
||||
make bin/skopeo
|
||||
'';
|
||||
installPhase = ''
|
||||
install -Dm755 bin/skopeo $out/bin/skopeo
|
||||
'';
|
||||
};
|
||||
in
|
||||
self
|
||||
@@ -7,7 +7,7 @@ let
|
||||
libassuan = (static pkg.libassuan);
|
||||
libgpgerror = (static pkg.libgpgerror);
|
||||
libseccomp = (static pkg.libseccomp);
|
||||
glib = (static pkg.glib).overrideAttrs(x: {
|
||||
glib = (static pkg.glib).overrideAttrs (x: {
|
||||
outputs = [ "bin" "out" "dev" ];
|
||||
mesonFlags = [
|
||||
"-Ddefault_library=static"
|
||||
@@ -15,14 +15,21 @@ let
|
||||
"-Dgtk_doc=false"
|
||||
"-Dnls=disabled"
|
||||
];
|
||||
postInstall = ''
|
||||
moveToOutput "share/glib-2.0" "$dev"
|
||||
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
|
||||
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
|
||||
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
|
||||
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
|
||||
'';
|
||||
});
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
static = pkg: pkg.overrideAttrs(x: {
|
||||
static = pkg: pkg.overrideAttrs (x: {
|
||||
doCheck = false;
|
||||
configureFlags = (x.configureFlags or []) ++ [
|
||||
configureFlags = (x.configureFlags or [ ]) ++ [
|
||||
"--without-shared"
|
||||
"--disable-shared"
|
||||
];
|
||||
@@ -38,13 +45,13 @@ let
|
||||
doCheck = false;
|
||||
enableParallelBuilding = true;
|
||||
outputs = [ "out" ];
|
||||
nativeBuildInputs = [ bash git go-md2man installShellFiles makeWrapper pkg-config which ];
|
||||
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
|
||||
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
|
||||
prePatch = ''
|
||||
export CFLAGS='-static'
|
||||
export CFLAGS='-static -pthread'
|
||||
export LDFLAGS='-s -w -static-libgcc -static'
|
||||
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
|
||||
export BUILDTAGS='static netgo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
|
||||
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
|
||||
'';
|
||||
buildPhase = ''
|
||||
patchShebangs .
|
||||
@@ -54,4 +61,5 @@ let
|
||||
install -Dm755 bin/skopeo $out/bin/skopeo
|
||||
'';
|
||||
};
|
||||
in self
|
||||
in
|
||||
self
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
{
|
||||
"url": "https://github.com/nixos/nixpkgs",
|
||||
"rev": "d5a689edda8219a1e20fd3871174b994cf0a94a3",
|
||||
"date": "2020-09-13T01:58:20+02:00",
|
||||
"sha256": "0m6nmi1fx0glfbg52kqdjgidxylk4p5xnx9v35wlsfi1j2xhkia4",
|
||||
"fetchSubmodules": false
|
||||
"rev": "eb7e1ef185f6c990cda5f71fdc4fb02e76ab06d5",
|
||||
"date": "2021-05-05T23:16:00+02:00",
|
||||
"path": "/nix/store/a98lkhjlsqh32ic2kkrv5kkik6jy25wh-nixpkgs",
|
||||
"sha256": "1ibz204c41g7baqga2iaj11yz9l75cfdylkiqjnk5igm81ivivxg",
|
||||
"fetchSubmodules": false,
|
||||
"deepClone": false,
|
||||
"leaveDotGit": false
|
||||
}
|
||||
|
||||
51
release/Makefile
Normal file
51
release/Makefile
Normal file
@@ -0,0 +1,51 @@
|
||||
## This Makefile is used to publish skopeo container images with Travis CI ##
|
||||
## Environment variables, used in this Makefile are specified in .travis.yml
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
# Dereference variable $(1), return value if non-empty, otherwise raise an error.
|
||||
err_if_empty = $(if $(strip $($(1))),$(strip $($(1))),$(error Required $(1) variable is undefined or empty))
|
||||
|
||||
# Requires two arguments: Names of the username and the password env. vars.
|
||||
define quay_login
|
||||
@echo "$(call err_if_empty,$(2))" | \
|
||||
docker login quay.io -u "$(call err_if_empty,$(1))" --password-stdin
|
||||
endef
|
||||
|
||||
# Build container image of skopeo upstream based on host architecture
|
||||
build-image/upstream:
|
||||
docker build -t "${UPSTREAM_IMAGE}-${GOARCH}" contrib/skopeoimage/upstream
|
||||
|
||||
# Build container image of skopeo stable based on host architecture
|
||||
build-image/stable:
|
||||
docker build -t "${STABLE_IMAGE}-${GOARCH}" contrib/skopeoimage/stable
|
||||
|
||||
# Push container image of skopeo upstream (based on host architecture) to image repository
|
||||
push-image/upstream:
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker push "${UPSTREAM_IMAGE}-${GOARCH}"
|
||||
|
||||
# Push container image of skopeo stable (based on host architecture) to image default and extra repositories
|
||||
push-image/stable:
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker push "${STABLE_IMAGE}-${GOARCH}"
|
||||
docker tag "${STABLE_IMAGE}-${GOARCH}" "${EXTRA_STABLE_IMAGE}-${GOARCH}"
|
||||
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
|
||||
docker push "${EXTRA_STABLE_IMAGE}-${GOARCH}"
|
||||
|
||||
# Create and push multiarch image manifest of skopeo upstream
|
||||
push-manifest-multiarch/upstream:
|
||||
docker manifest create "${UPSTREAM_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${UPSTREAM_IMAGE}-${arch})
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${UPSTREAM_IMAGE}"
|
||||
|
||||
# Create and push multiarch image manifest of skopeo stable
|
||||
push-manifest-multiarch/stable:
|
||||
docker manifest create "${STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${STABLE_IMAGE}-${arch})
|
||||
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${STABLE_IMAGE}"
|
||||
# Push to extra repository
|
||||
docker manifest create "${EXTRA_STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${EXTRA_STABLE_IMAGE}-${arch})
|
||||
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
|
||||
docker manifest push --purge "${EXTRA_STABLE_IMAGE}"
|
||||
40
release/README.md
Normal file
40
release/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# skopeo container image build with Travis
|
||||
|
||||
This document describes the details and requirements to build and publish skopeo container images. The images are published as several architecture specific images and multiarch images on top for upstream and stable versions.
|
||||
|
||||
The Travis configuration is available at `.travis.yml`.
|
||||
|
||||
The code to build and publish images is available at `release/Makefile` and should be used only via Travis.
|
||||
|
||||
Travis workflow has 3 major pieces:
|
||||
- `local-build` - build and test source code locally on osx and linux/amd64 environments, 2 jobs are running in parallel
|
||||
- `image-build-push` - build and push container images with several Travis jobs running in parallel to build images for several architectures (linux/amd64, linux/s390x, linux/ppc64le). Build part is done for each PR, push part is executed only in case of cron job or master branch update.
|
||||
- `manifest-multiarch-push` - create and push image manifests, which consists of architecture specific images from previous step. Executed only in case of cron job or master branch update.
|
||||
|
||||
## Ways to have full workflow run
|
||||
- [cron job](https://docs.travis-ci.com/user/cron-jobs/#adding-cron-jobs)
|
||||
- Trigger build from Travis CI
|
||||
- Update code in master branch
|
||||
|
||||
## Environment variables
|
||||
|
||||
Several environment variables are used to customize image names and keep private credentials to push to quay.io repositories.
|
||||
|
||||
**Image tags** are specified in environment variable and should be manually updated in case of new release.
|
||||
|
||||
- `SKOPEO_QUAY_USERNAME` and `SKOPEO_QUAY_PASSWORD` are credentials to push images to `quay.io/skopeo/stable` and `quay.io/skopeo/upstream` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
|
||||
- `CONTAINERS_QUAY_USERNAME` and `CONTAINERS_QUAY_PASSWORD` are credentials to push images to `quay.io/containers/skopeo` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
|
||||
|
||||
Variables in .travis.yml
|
||||
- `MULTIARCH_MANIFEST_ARCHITECTURES` is a list with architecture shortnames, to appear in final multiarch manifest. The values should fit to architectures used in the `image-build-push` Travis step.
|
||||
- `STABLE_IMAGE`, `EXTRA_STABLE_IMAGE` are image names to publish stable Skopeo.
|
||||
- `UPSTREAM_IMAGE` is an image name to publish upstream Skopeo.
|
||||
|
||||
### Values for environment variables
|
||||
|
||||
| Env variable | Value |
|
||||
| -------------------------------- |----------------------------------|
|
||||
| MULTIARCH_MANIFEST_ARCHITECTURES | "amd64 s390x ppc64le" |
|
||||
| STABLE_IMAGE | quay.io/skopeo/stable:v1.2.0 |
|
||||
| EXTRA_STABLE_IMAGE | quay.io/containers/skopeo:v1.2.0 |
|
||||
| UPSTREAM_IMAGE | quay.io/skopeo/upstream:master |
|
||||
@@ -27,11 +27,20 @@ load helpers
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
run_skopeo inspect --raw dir:$workdir
|
||||
inspect_local_raw=$output
|
||||
config_digest=$(jq -r '.config.digest' <<<"$inspect_local_raw")
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
# Each SHA-named layer file (but not the config) must be listed in the output of 'inspect'.
|
||||
# In all existing versions of Skopeo (with 1.6 being the current as of this comment),
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
if [ "sha256:$sha" != "$config_digest" ]; then
|
||||
expect_output --from="$layers" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
fi
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
@@ -65,59 +74,47 @@ END_EXPECT
|
||||
}
|
||||
|
||||
@test "inspect: env" {
|
||||
remote_image=docker://docker.io/fedora:latest
|
||||
remote_image=docker://quay.io/libpod/fedora:31
|
||||
run_skopeo inspect $remote_image
|
||||
inspect_remote=$output
|
||||
|
||||
# Simple check on 'inspect' output with environment variables.
|
||||
# 1) Get remote image values of environment variables (the value of 'Env')
|
||||
# 2) Confirm substring in check_array and the value of 'Env' match.
|
||||
check_array=(PATH=.* )
|
||||
check_array=(FGC=f31 DISTTAG=f31container)
|
||||
remote=$(jq '.Env[]' <<<"$inspect_remote")
|
||||
for substr in ${check_array[@]}; do
|
||||
expect_output --from="$remote" --substring "$substr"
|
||||
done
|
||||
}
|
||||
|
||||
# Tests https://github.com/containers/skopeo/pull/708
|
||||
@test "inspect: image manifest list w/ diff platform" {
|
||||
# When --raw is provided, can inspect show the raw manifest list, w/o
|
||||
# requiring any particular platform to be present
|
||||
# To test whether container image can be inspected successfully w/o
|
||||
# platform dependency.
|
||||
# 1) Get current platform arch
|
||||
# 2) Inspect container image is different from current platform arch
|
||||
# 3) Compare output w/ expected result
|
||||
# This image's manifest is for an os + arch that is... um, unlikely
|
||||
# to support skopeo in the foreseeable future. Or past. The image
|
||||
# is created by the make-noarch-manifest script in this directory.
|
||||
img=docker://quay.io/libpod/notmyarch:20210121
|
||||
|
||||
# Here we see a revolting workaround for a podman incompatibility
|
||||
# change: in April 2020, podman info completely changed format
|
||||
# of the keys. What worked until then now throws an error. We
|
||||
# need to work with both old and new podman.
|
||||
arch=$(podman info --format '{{.host.arch}}' || true)
|
||||
if [[ -z "$arch" ]]; then
|
||||
arch=$(podman info --format '{{.Host.Arch}}')
|
||||
fi
|
||||
# Get our host arch (what we're running on). This assumes that skopeo
|
||||
# arch matches podman; it also assumes running podman >= April 2020
|
||||
# (prior to that, the format keys were lower-case).
|
||||
arch=$(podman info --format '{{.Host.Arch}}')
|
||||
|
||||
case $arch in
|
||||
"amd64")
|
||||
diff_arch_list="s390x ppc64le"
|
||||
;;
|
||||
"s390x")
|
||||
diff_arch_list="amd64 ppc64le"
|
||||
;;
|
||||
"ppc64le")
|
||||
diff_arch_list="amd64 s390x"
|
||||
;;
|
||||
"*")
|
||||
diff_arch_list="amd64 s390x ppc64le"
|
||||
;;
|
||||
esac
|
||||
# By default, 'inspect' tries to match our host os+arch. This should fail.
|
||||
run_skopeo 1 inspect $img
|
||||
expect_output --substring "Error parsing manifest for image: Error choosing image instance: no image found in manifest list for architecture $arch, variant " \
|
||||
"skopeo inspect, without --raw, fails"
|
||||
|
||||
for arch in $diff_arch_list; do
|
||||
remote_image=docker://docker.io/$arch/golang
|
||||
run_skopeo inspect --tls-verify=false --raw $remote_image
|
||||
remote_arch=$(jq -r '.manifests[0]["platform"]["architecture"]' <<< "$output")
|
||||
expect_output --from="$remote_arch" "$arch" "platform arch of $remote_image"
|
||||
done
|
||||
# With --raw, we can inspect
|
||||
run_skopeo inspect --raw $img
|
||||
expect_output --substring "manifests.*platform.*architecture" \
|
||||
"skopeo inspect --raw returns reasonable output"
|
||||
|
||||
# ...and what we get should be consistent with what our script created.
|
||||
archinfo=$(jq -r '.manifests[0].platform | {os,variant,architecture} | join("-")' <<<"$output")
|
||||
|
||||
expect_output --from="$archinfo" "amigaos-1000-mc68000" \
|
||||
"os - variant - architecture of $img"
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
||||
@@ -14,7 +14,7 @@ function setup() {
|
||||
# From remote, to dir1, to local, to dir2;
|
||||
# compare dir1 and dir2, expect no changes
|
||||
@test "copy: dir, round trip" {
|
||||
local remote_image=docker://docker.io/library/busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
@@ -30,7 +30,7 @@ function setup() {
|
||||
|
||||
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
|
||||
@test "copy: oci, round trip" {
|
||||
local remote_image=docker://docker.io/library/busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/oci1
|
||||
@@ -45,8 +45,8 @@ function setup() {
|
||||
}
|
||||
|
||||
# Compression zstd
|
||||
@test "copy: oci, round trip, zstd" {
|
||||
local remote_image=docker://docker.io/library/busybox:latest
|
||||
@test "copy: oci, zstd" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
@@ -57,11 +57,17 @@ function setup() {
|
||||
|
||||
# Check there is at least one file that has the zstd magic number as the first 4 bytes
|
||||
(for i in $dir/blobs/sha256/*; do test "$(head -c 4 $i)" = $magic && exit 0; done; exit 1)
|
||||
|
||||
# Check that the manifest's description of the image's first layer is the zstd layer type
|
||||
instance=$(jq -r '.manifests[0].digest' $dir/index.json)
|
||||
[[ "$instance" != null ]]
|
||||
mediatype=$(jq -r '.layers[0].mediaType' < $dir/blobs/${instance/://})
|
||||
[[ "$mediatype" == "application/vnd.oci.image.layer.v1.tar+zstd" ]]
|
||||
}
|
||||
|
||||
# Same image, extracted once with :tag and once without
|
||||
@test "copy: oci w/ and w/o tags" {
|
||||
local remote_image=docker://docker.io/library/busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
@@ -78,7 +84,7 @@ function setup() {
|
||||
|
||||
# Registry -> storage -> oci-archive
|
||||
@test "copy: registry -> storage -> oci-archive" {
|
||||
local alpine=docker.io/library/alpine:latest
|
||||
local alpine=quay.io/libpod/alpine:latest
|
||||
local tmp=$TESTDIR/oci
|
||||
|
||||
run_skopeo copy docker://$alpine containers-storage:$alpine
|
||||
@@ -94,6 +100,50 @@ function setup() {
|
||||
docker://localhost:5000/foo
|
||||
}
|
||||
|
||||
# manifest format
|
||||
@test "copy: manifest format" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy --format v2s2 $remote_image dir:$dir1
|
||||
run_skopeo copy --format oci $remote_image dir:$dir2
|
||||
grep 'application/vnd.docker.distribution.manifest.v2' $dir1/manifest.json
|
||||
grep 'application/vnd.oci.image' $dir2/manifest.json
|
||||
}
|
||||
|
||||
# additional tag
|
||||
@test "copy: additional tag" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
# additional-tag is supported only for docker-archive
|
||||
run_skopeo copy --additional-tag busybox:mine $remote_image \
|
||||
docker-archive:$TESTDIR/mybusybox.tar:busybox:latest
|
||||
mkdir -p $TESTDIR/podmanroot
|
||||
run podman --root $TESTDIR/podmanroot load -i $TESTDIR/mybusybox.tar
|
||||
run podman --root $TESTDIR/podmanroot images
|
||||
expect_output --substring "mine"
|
||||
|
||||
}
|
||||
|
||||
# shared blob directory
|
||||
@test "copy: shared blob directory" {
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
|
||||
local shareddir=$TESTDIR/shareddir
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy --dest-shared-blob-dir $shareddir \
|
||||
$remote_image oci:$dir1
|
||||
[ -n "$(ls $shareddir)" ]
|
||||
[ -z "$(ls $dir1/blobs)" ]
|
||||
run_skopeo copy --src-shared-blob-dir $shareddir \
|
||||
oci:$dir1 oci:$dir2
|
||||
diff -urN $shareddir $dir2/blobs
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
|
||||
@@ -8,19 +8,28 @@ load helpers
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry --with-cert reg
|
||||
start_registry --with-cert --enable-delete=true reg
|
||||
}
|
||||
|
||||
@test "local registry, with cert" {
|
||||
# Push to local registry...
|
||||
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
|
||||
docker://docker.io/library/busybox:latest \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
|
||||
# ...and pull it back out
|
||||
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned \
|
||||
dir:$TESTDIR/extracted
|
||||
|
||||
# inspect with cert
|
||||
run_skopeo inspect --cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
|
||||
# delete with cert
|
||||
run_skopeo delete --cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
}
|
||||
|
||||
teardown() {
|
||||
|
||||
@@ -18,7 +18,7 @@ function setup() {
|
||||
testuser=testuser
|
||||
testpassword=$(random_string 15)
|
||||
|
||||
start_registry --testuser=$testuser --testpassword=$testpassword reg
|
||||
start_registry --testuser=$testuser --testpassword=$testpassword --enable-delete=true reg
|
||||
}
|
||||
|
||||
@test "auth: credentials on command line" {
|
||||
@@ -43,7 +43,7 @@ function setup() {
|
||||
|
||||
# These should pass
|
||||
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
|
||||
docker://docker.io/library/busybox:latest \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/busybox:mine
|
||||
@@ -55,7 +55,7 @@ function setup() {
|
||||
podman login --tls-verify=false -u $testuser -p $testpassword localhost:5000
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://docker.io/library/busybox:latest \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
@@ -67,6 +67,47 @@ function setup() {
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
}
|
||||
|
||||
@test "auth: copy with --src-creds and --dest-creds" {
|
||||
run_skopeo copy --dest-tls-verify=false --dest-creds=$testuser:$testpassword \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:mine
|
||||
run_skopeo copy --src-tls-verify=false --src-creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/busybox:mine \
|
||||
dir:$TESTDIR/dir1
|
||||
run ls $TESTDIR/dir1
|
||||
expect_output --substring "manifest.json"
|
||||
}
|
||||
|
||||
@test "auth: credentials via authfile" {
|
||||
podman login --tls-verify=false --authfile $TESTDIR/test.auth -u $testuser -p $testpassword localhost:5000
|
||||
|
||||
# copy without authfile: should fail
|
||||
run_skopeo 1 copy --dest-tls-verify=false \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:mine
|
||||
|
||||
# copy with authfile: should work
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
--authfile $TESTDIR/test.auth \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:mine
|
||||
|
||||
# inspect without authfile: should fail
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# inspect with authfile: should work
|
||||
run_skopeo inspect --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
|
||||
# delete without authfile: should fail
|
||||
run_skopeo 1 delete --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "authentication required"
|
||||
|
||||
# delete with authfile: should work
|
||||
run_skopeo delete --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ END_POLICY_JSON
|
||||
fi
|
||||
|
||||
# Cache local copy
|
||||
run_skopeo copy docker://docker.io/library/busybox:latest \
|
||||
run_skopeo copy docker://quay.io/libpod/busybox:latest \
|
||||
dir:$TESTDIR/busybox
|
||||
|
||||
# Push a bunch of images. Do so *without* --policy flag; this lets us
|
||||
@@ -143,6 +143,75 @@ END_PUSH
|
||||
END_TESTS
|
||||
}
|
||||
|
||||
@test "signing: remove signature" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
die "exit code is $status; expected 0"
|
||||
fi
|
||||
|
||||
# Cache local copy
|
||||
run_skopeo copy docker://quay.io/libpod/busybox:latest \
|
||||
dir:$TESTDIR/busybox
|
||||
# Push a signed image
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
--sign-by=alice@test.redhat.com \
|
||||
dir:$TESTDIR/busybox \
|
||||
docker://localhost:5000/myns/alice:signed
|
||||
# Fetch the image with signature
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
--policy $POLICY_JSON \
|
||||
copy --src-tls-verify=false \
|
||||
docker://localhost:5000/myns/alice:signed \
|
||||
dir:$TESTDIR/busybox-signed
|
||||
# Fetch the image with removing signature
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
--policy $POLICY_JSON \
|
||||
copy --src-tls-verify=false \
|
||||
--remove-signatures \
|
||||
docker://localhost:5000/myns/alice:signed \
|
||||
dir:$TESTDIR/busybox-unsigned
|
||||
ls $TESTDIR/busybox-signed | grep "signature"
|
||||
[ -z "$(ls $TESTDIR/busybox-unsigned | grep "signature")" ]
|
||||
}
|
||||
|
||||
@test "signing: standalone" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
die "exit code is $status; expected 0"
|
||||
fi
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://quay.io/libpod/busybox:latest \
|
||||
docker://localhost:5000/busybox:latest
|
||||
run_skopeo copy --src-tls-verify=false \
|
||||
docker://localhost:5000/busybox:latest \
|
||||
dir:$TESTDIR/busybox
|
||||
# Standalone sign
|
||||
run_skopeo standalone-sign -o $TESTDIR/busybox.signature \
|
||||
$TESTDIR/busybox/manifest.json \
|
||||
localhost:5000/busybox:latest \
|
||||
alice@test.redhat.com
|
||||
# Standalone verify
|
||||
fingerprint=$(gpg --list-keys | grep -B1 alice.test.redhat.com | head -n 1)
|
||||
run_skopeo standalone-verify $TESTDIR/busybox/manifest.json \
|
||||
localhost:5000/busybox:latest \
|
||||
$fingerprint \
|
||||
$TESTDIR/busybox.signature
|
||||
# manifest digest
|
||||
digest=$(echo "$output" | awk '{print $4;}')
|
||||
run_skopeo manifest-digest $TESTDIR/busybox/manifest.json
|
||||
expect_output $digest
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ function setup() {
|
||||
|
||||
# delete image from registry
|
||||
@test "delete: remove image from registry" {
|
||||
local remote_image=docker://docker.io/library/busybox:latest
|
||||
local remote_image=docker://quay.io/libpod/busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
local output=
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
# Default image to run as a local registry
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-docker.io/library/registry:2}
|
||||
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
@@ -194,7 +194,7 @@ function expect_output() {
|
||||
fi
|
||||
|
||||
# This is a multi-line message, which may in turn contain multi-line
|
||||
# output, so let's format it ourself, readably
|
||||
# output, so let's format it ourselves, readably
|
||||
local -a actual_split
|
||||
readarray -t actual_split <<<"$actual"
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
@@ -331,7 +331,8 @@ start_registry() {
|
||||
log_and_run openssl req -newkey rsa:4096 -nodes -sha256 \
|
||||
-keyout $AUTHDIR/domain.key -x509 -days 2 \
|
||||
-out $CERT \
|
||||
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
|
||||
-addext subjectAltName=DNS:localhost
|
||||
fi
|
||||
|
||||
reg_args+=(
|
||||
@@ -355,7 +356,7 @@ start_registry() {
|
||||
return
|
||||
fi
|
||||
|
||||
timeout=$(expr $timeout - 1)
|
||||
timeout=$(( timeout - 1 ))
|
||||
sleep 1
|
||||
done
|
||||
die "Timed out waiting for registry container to respond on :$port"
|
||||
|
||||
70
systemtest/make-noarch-manifest
Executable file
70
systemtest/make-noarch-manifest
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Tool for creating an image whose OS and arch will (probably) never
|
||||
# match a system on which skopeo will run. This image will be used
|
||||
# in the 'inspect' test.
|
||||
#
|
||||
set -ex
|
||||
|
||||
# Name and tag of the image we create
|
||||
imgname=notmyarch
|
||||
imgtag=$(date +%Y%m%d)
|
||||
|
||||
# (In case older image exists from a prior run)
|
||||
buildah rmi $imgname:$imgtag &>/dev/null || true
|
||||
|
||||
#
|
||||
# Step 1: create an image containing only a README and a copy of this script
|
||||
#
|
||||
id=$(buildah from scratch)
|
||||
|
||||
now=$(date --rfc-3339=seconds)
|
||||
readme=$(mktemp -t README.XXXXXXXX)
|
||||
ME=$(basename $0)
|
||||
|
||||
cat >| $readme <<EOF
|
||||
This is a dummy image intended solely for skopeo testing.
|
||||
|
||||
This image was created $now
|
||||
|
||||
The script used to create this image is available as $ME
|
||||
EOF
|
||||
|
||||
buildah copy $id $readme /README
|
||||
buildah copy $id $0 /$ME
|
||||
|
||||
buildah commit $id my_tmp_image
|
||||
buildah rm $id
|
||||
|
||||
#
|
||||
# Step 2: create a manifest list, then add the above image but with
|
||||
# an os+arch override.
|
||||
#
|
||||
buildah manifest create $imgname:$imgtag
|
||||
|
||||
buildah manifest add \
|
||||
--os amigaos \
|
||||
--arch mc68000 \
|
||||
--variant 1000 \
|
||||
$imgname:$imgtag my_tmp_image
|
||||
|
||||
# Done. Show instructions.
|
||||
cat <<EOF
|
||||
DONE!
|
||||
|
||||
You can inspect the created image with:
|
||||
|
||||
skopeo inspect --raw containers-storage:localhost/$imgname:$imgtag | jq .
|
||||
|
||||
(FIXME: is there a way to, like, mount the image and verify the files?)
|
||||
|
||||
If you're happy with this image, you can now:
|
||||
|
||||
buildah manifest push --all $imgname:$imgtag docker://quay.io/libpod/$imgname:$imgtag
|
||||
|
||||
Once done, you urgently need to:
|
||||
|
||||
buildah rmi $imgname:$imgtag my_tmp_image
|
||||
|
||||
If you don't do this, 'podman images' will barf catastrophically!
|
||||
EOF
|
||||
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @microsoft/containerplat
|
||||
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
2
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -1,4 +1,4 @@
|
||||
# go-winio
|
||||
# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
|
||||
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
@@ -1,344 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
TypeGNUSparse = 'S' // sparse file
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
CreationTime time.Time // creation time
|
||||
Xattrs map[string]string
|
||||
Winheaders map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxCreationTime = "LIBARCHIVE.creationtime"
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxXattr = "SCHILY.xattr."
|
||||
paxWindows = "MSWINDOWS."
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
||||
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
||||
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
||||
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
@@ -1,444 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
|
||||
var unixTime int64
|
||||
if !t.Before(minTime) && !t.After(maxTime) {
|
||||
unixTime = t.Unix()
|
||||
}
|
||||
formatNumeric(b, unixTime, paxNone)
|
||||
|
||||
// Write a PAX header if the time didn't fit precisely.
|
||||
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
|
||||
paxHeaders[paxKeyword] = formatPAXTime(t)
|
||||
}
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if allowPax {
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
|
||||
}
|
||||
if !hdr.CreationTime.IsZero() {
|
||||
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
|
||||
}
|
||||
for k, v := range hdr.Xattrs {
|
||||
paxHeaders[paxXattr+k] = v
|
||||
}
|
||||
for k, v := range hdr.Winheaders {
|
||||
paxHeaders[paxWindows+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
func formatPAXTime(t time.Time) string {
|
||||
sec := t.Unix()
|
||||
usec := t.Nanosecond()
|
||||
s := strconv.FormatInt(sec, 10)
|
||||
if usec != 0 {
|
||||
s = fmt.Sprintf("%s.%09d", s, usec)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteAfterClose
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
||||
68
vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
Normal file
68
vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
|
||||
// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
|
||||
// Idea taken from containerd which did the same thing.
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||
// specification. Note that this implementation allows for negative timestamps,
|
||||
// which is allowed for by the PAX specification, but not always portable.
|
||||
func parsePAXTime(s string) (time.Time, error) {
|
||||
const maxNanoSecondDigits = 9
|
||||
|
||||
// Split string into seconds and sub-seconds parts.
|
||||
ss, sn := s, ""
|
||||
if pos := strings.IndexByte(s, '.'); pos >= 0 {
|
||||
ss, sn = s[:pos], s[pos+1:]
|
||||
}
|
||||
|
||||
// Parse the seconds.
|
||||
secs, err := strconv.ParseInt(ss, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, tar.ErrHeader
|
||||
}
|
||||
if len(sn) == 0 {
|
||||
return time.Unix(secs, 0), nil // No sub-second values
|
||||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, tar.ErrHeader
|
||||
}
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
return time.Unix(secs, nsecs), nil
|
||||
}
|
||||
|
||||
// formatPAXTime converts ts into a time of the form %d.%d as described in the
|
||||
// PAX specification. This function is capable of negative timestamps.
|
||||
func formatPAXTime(ts time.Time) (s string) {
|
||||
secs, nsecs := ts.Unix(), ts.Nanosecond()
|
||||
if nsecs == 0 {
|
||||
return strconv.FormatInt(secs, 10)
|
||||
}
|
||||
|
||||
// If seconds is negative, then perform correction.
|
||||
sign := ""
|
||||
if secs < 0 {
|
||||
sign = "-" // Remember sign
|
||||
secs = -(secs + 1) // Add a second to secs
|
||||
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
|
||||
}
|
||||
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
|
||||
}
|
||||
67
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
67
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,11 +33,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "fileattr"
|
||||
hdrSecurityDescriptor = "sd"
|
||||
hdrRawSecurityDescriptor = "rawsd"
|
||||
hdrMountPoint = "mountpoint"
|
||||
hdrEaPrefix = "xattr."
|
||||
hdrFileAttributes = "MSWINDOWS.fileattr"
|
||||
hdrSecurityDescriptor = "MSWINDOWS.sd"
|
||||
hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
|
||||
hdrMountPoint = "MSWINDOWS.mountpoint"
|
||||
hdrEaPrefix = "MSWINDOWS.xattr."
|
||||
|
||||
hdrCreationTime = "LIBARCHIVE.creationtime"
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
@@ -86,16 +89,17 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
|
||||
Winheaders: make(map[string]string),
|
||||
Format: tar.FormatPAX,
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
PAXRecords: make(map[string]string),
|
||||
}
|
||||
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
@@ -155,7 +159,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
@@ -166,7 +170,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.Winheaders[hdrMountPoint] = "1"
|
||||
hdr.PAXRecords[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
@@ -183,7 +187,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
@@ -254,6 +258,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Format: hdr.Format,
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
@@ -293,12 +298,13 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
|
||||
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
// Default to ModTime, we'll pull hdrCreationTime below if present
|
||||
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
|
||||
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
@@ -309,6 +315,13 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
||||
creationTime, err := parsePAXTime(creationTimeStr)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -321,13 +334,13 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -348,7 +361,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.Winheaders {
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
@@ -380,7 +393,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
|
||||
36
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
36
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
@@ -5,21 +5,14 @@ package winio
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const (
|
||||
fileBasicInfo = 0
|
||||
fileIDInfo = 0x12
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
@@ -27,7 +20,7 @@ type FileBasicInfo struct {
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
@@ -36,13 +29,32 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileStandardInfo contains extended information for the file.
|
||||
// FILE_STANDARD_INFO in WinBase.h
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
|
||||
type FileStandardInfo struct {
|
||||
AllocationSize, EndOfFile int64
|
||||
NumberOfLinks uint32
|
||||
DeletePending, Directory bool
|
||||
}
|
||||
|
||||
// GetFileStandardInfo retrieves ended information for the file.
|
||||
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
|
||||
si := &FileStandardInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
@@ -53,7 +65,7 @@ type FileIDInfo struct {
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
|
||||
6
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
6
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@@ -3,7 +3,7 @@ module github.com/Microsoft/go-winio
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
|
||||
20
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
20
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
@@ -1,18 +1,14 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
2
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
|
||||
21
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
21
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@@ -182,13 +182,14 @@ func (s pipeAddress) String() string {
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
@@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
@@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||
}
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
h, err = tryDialPipe(ctx, &path, access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -422,10 +429,10 @@ type PipeConfig struct {
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
// InputBufferSize specifies the size of the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
// OutputBufferSize specifies the size of the output buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
|
||||
161
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
Normal file
161
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// +build windows
|
||||
|
||||
package security
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
accessMask uint32
|
||||
accessMode uint32
|
||||
desiredAccess uint32
|
||||
inheritMode uint32
|
||||
objectType uint32
|
||||
shareMode uint32
|
||||
securityInformation uint32
|
||||
trusteeForm uint32
|
||||
trusteeType uint32
|
||||
|
||||
explicitAccess struct {
|
||||
accessPermissions accessMask
|
||||
accessMode accessMode
|
||||
inheritance inheritMode
|
||||
trustee trustee
|
||||
}
|
||||
|
||||
trustee struct {
|
||||
multipleTrustee *trustee
|
||||
multipleTrusteeOperation int32
|
||||
trusteeForm trusteeForm
|
||||
trusteeType trusteeType
|
||||
name uintptr
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ
|
||||
|
||||
accessModeGrant accessMode = 1
|
||||
|
||||
desiredAccessReadControl desiredAccess = 0x20000
|
||||
desiredAccessWriteDac desiredAccess = 0x40000
|
||||
|
||||
gvmga = "GrantVmGroupAccess:"
|
||||
|
||||
inheritModeNoInheritance inheritMode = 0x0
|
||||
inheritModeSubContainersAndObjectsInherit inheritMode = 0x3
|
||||
|
||||
objectTypeFileObject objectType = 0x1
|
||||
|
||||
securityInformationDACL securityInformation = 0x4
|
||||
|
||||
shareModeRead shareMode = 0x1
|
||||
shareModeWrite shareMode = 0x2
|
||||
|
||||
sidVmGroup = "S-1-5-83-0"
|
||||
|
||||
trusteeFormIsSid trusteeForm = 0
|
||||
|
||||
trusteeTypeWellKnownGroup trusteeType = 5
|
||||
)
|
||||
|
||||
// GrantVMGroupAccess sets the DACL for a specified file or directory to
|
||||
// include Grant ACE entries for the VM Group SID. This is a golang re-
|
||||
// implementation of the same function in vmcompute, just not exported in
|
||||
// RS5. Which kind of sucks. Sucks a lot :/
|
||||
func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
fd, err := createFile(name, s.IsDir())
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.CloseHandle(fd)
|
||||
|
||||
// Get the current DACL and Security Descriptor. Must defer LocalFree on success.
|
||||
ot := objectTypeFileObject
|
||||
si := securityInformationDACL
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
// Generate a new DACL which is the current DACL with the required ACEs added.
|
||||
// Must defer LocalFree on success.
|
||||
newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL)
|
||||
if err != nil {
|
||||
return err // Already wrapped
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFile is a helper function to call [Nt]CreateFile to get a handle to
|
||||
// the file or directory.
|
||||
func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
namep := syscall.StringToUTF16(name)
|
||||
da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
|
||||
sm := uint32(shareModeRead | shareModeWrite)
|
||||
fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
|
||||
if isDir {
|
||||
fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added.
|
||||
// The caller is responsible for LocalFree of the returned DACL on success.
|
||||
func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
if isDir {
|
||||
inheritance = inheritModeSubContainersAndObjectsInherit
|
||||
}
|
||||
|
||||
eaArray := []explicitAccess{
|
||||
explicitAccess{
|
||||
accessPermissions: accessMaskDesiredPermission,
|
||||
accessMode: accessModeGrant,
|
||||
inheritance: inheritance,
|
||||
trustee: trustee{
|
||||
trusteeForm: trusteeFormIsSid,
|
||||
trusteeType: trusteeTypeWellKnownGroup,
|
||||
name: uintptr(unsafe.Pointer(sid)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
}
|
||||
7
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
Normal file
7
vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package security
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
|
||||
|
||||
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
|
||||
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
|
||||
//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW
|
||||
70
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
Normal file
70
vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package security
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
|
||||
procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
|
||||
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
|
||||
)
|
||||
|
||||
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
5
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
@@ -28,8 +28,9 @@ const (
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@@ -1,3 +1,3 @@
|
||||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
|
||||
352
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
352
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -2,150 +2,322 @@
|
||||
|
||||
package vhd
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
|
||||
type virtualStorageType struct {
|
||||
DeviceID uint32
|
||||
VendorID [16]byte
|
||||
}
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath
|
||||
|
||||
type (
|
||||
createVirtualDiskFlag uint32
|
||||
VirtualDiskAccessMask uint32
|
||||
CreateVirtualDiskFlag uint32
|
||||
VirtualDiskFlag uint32
|
||||
AttachVirtualDiskFlag uint32
|
||||
DetachVirtualDiskFlag uint32
|
||||
VirtualDiskAccessMask uint32
|
||||
)
|
||||
|
||||
const (
|
||||
// Flags for creating a VHD (not exported)
|
||||
createVirtualDiskFlagNone createVirtualDiskFlag = 0
|
||||
createVirtualDiskFlagFullPhysicalAllocation createVirtualDiskFlag = 1
|
||||
createVirtualDiskFlagPreventWritesToSourceDisk createVirtualDiskFlag = 2
|
||||
createVirtualDiskFlagDoNotCopyMetadataFromParent createVirtualDiskFlag = 4
|
||||
type VirtualStorageType struct {
|
||||
DeviceID uint32
|
||||
VendorID guid.GUID
|
||||
}
|
||||
|
||||
// Access Mask for opening a VHD
|
||||
VirtualDiskAccessNone VirtualDiskAccessMask = 0
|
||||
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 65536
|
||||
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 131072
|
||||
VirtualDiskAccessDetach VirtualDiskAccessMask = 262144
|
||||
VirtualDiskAccessGetInfo VirtualDiskAccessMask = 524288
|
||||
VirtualDiskAccessCreate VirtualDiskAccessMask = 1048576
|
||||
VirtualDiskAccessMetaOps VirtualDiskAccessMask = 2097152
|
||||
VirtualDiskAccessRead VirtualDiskAccessMask = 851968
|
||||
VirtualDiskAccessAll VirtualDiskAccessMask = 4128768
|
||||
VirtualDiskAccessWritable VirtualDiskAccessMask = 3276800
|
||||
|
||||
// Flags for opening a VHD
|
||||
OpenVirtualDiskFlagNone VirtualDiskFlag = 0
|
||||
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x1
|
||||
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x2
|
||||
OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x4
|
||||
OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x8
|
||||
OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x10
|
||||
OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x20
|
||||
OpenVirtualDiskFlagVhdSetFileOnly VirtualDiskFlag = 0x40
|
||||
OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x80
|
||||
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x100
|
||||
)
|
||||
|
||||
type createVersion2 struct {
|
||||
UniqueID [16]byte // GUID
|
||||
type CreateVersion2 struct {
|
||||
UniqueID guid.GUID
|
||||
MaximumSize uint64
|
||||
BlockSizeInBytes uint32
|
||||
SectorSizeInBytes uint32
|
||||
PhysicalSectorSizeInByte uint32
|
||||
ParentPath *uint16 // string
|
||||
SourcePath *uint16 // string
|
||||
OpenFlags uint32
|
||||
ParentVirtualStorageType virtualStorageType
|
||||
SourceVirtualStorageType virtualStorageType
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
ParentVirtualStorageType VirtualStorageType
|
||||
SourceVirtualStorageType VirtualStorageType
|
||||
ResiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type createVirtualDiskParameters struct {
|
||||
type CreateVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 createVersion2
|
||||
Version2 CreateVersion2
|
||||
}
|
||||
|
||||
type openVersion2 struct {
|
||||
GetInfoOnly int32 // bool but 4-byte aligned
|
||||
ReadOnly int32 // bool but 4-byte aligned
|
||||
ResiliencyGUID [16]byte // GUID
|
||||
type OpenVersion2 struct {
|
||||
GetInfoOnly bool
|
||||
ReadOnly bool
|
||||
ResiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
type OpenVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 openVersion2
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// CreateVhdx will create a simple vhdx file at the given path using default values.
|
||||
type AttachVersion2 struct {
|
||||
RestrictedOffset uint64
|
||||
RestrictedLength uint64
|
||||
}
|
||||
|
||||
type AttachVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version2 AttachVersion2
|
||||
}
|
||||
|
||||
const (
|
||||
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
|
||||
|
||||
// Access Mask for opening a VHD
|
||||
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
|
||||
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
|
||||
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
|
||||
VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000
|
||||
VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000
|
||||
VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000
|
||||
VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000
|
||||
VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000
|
||||
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
|
||||
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
|
||||
|
||||
// Flags for creating a VHD
|
||||
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
|
||||
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
|
||||
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
|
||||
CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4
|
||||
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
|
||||
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
|
||||
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
|
||||
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
|
||||
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
|
||||
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
|
||||
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
|
||||
|
||||
// Flags for opening a VHD
|
||||
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
|
||||
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
|
||||
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
|
||||
OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004
|
||||
OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008
|
||||
OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010
|
||||
OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020
|
||||
OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040
|
||||
OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080
|
||||
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
|
||||
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
|
||||
|
||||
// Flags for attaching a VHD
|
||||
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
|
||||
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
|
||||
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
|
||||
AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004
|
||||
AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008
|
||||
AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010
|
||||
AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020
|
||||
AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040
|
||||
AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080
|
||||
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
|
||||
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
|
||||
|
||||
// Flags for detaching a VHD
|
||||
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
|
||||
)
|
||||
|
||||
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
|
||||
// default values.
|
||||
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
var (
|
||||
defaultType virtualStorageType
|
||||
handle syscall.Handle
|
||||
)
|
||||
|
||||
parameters := createVirtualDiskParameters{
|
||||
params := CreateVirtualDiskParameters{
|
||||
Version: 2,
|
||||
Version2: createVersion2{
|
||||
Version2: CreateVersion2{
|
||||
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
|
||||
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
if err := createVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
uint32(VirtualDiskAccessNone),
|
||||
nil,
|
||||
uint32(createVirtualDiskFlagNone),
|
||||
0,
|
||||
¶meters,
|
||||
nil,
|
||||
&handle); err != nil {
|
||||
handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachVhd detaches a mounted container layer vhd found at `path`.
|
||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to detach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachVhd detaches a vhd found at `path`.
|
||||
func DetachVhd(path string) error {
|
||||
handle, err := OpenVirtualDisk(
|
||||
path,
|
||||
VirtualDiskAccessNone,
|
||||
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator)
|
||||
|
||||
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
return detachVirtualDisk(handle, 0, 0)
|
||||
return DetachVirtualDisk(handle)
|
||||
}
|
||||
|
||||
// AttachVirtualDisk attaches a virtual hard disk for use.
|
||||
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
|
||||
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
|
||||
if err := attachVirtualDisk(
|
||||
handle,
|
||||
nil,
|
||||
uint32(attachVirtualDiskFlag),
|
||||
0,
|
||||
parameters,
|
||||
nil,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
|
||||
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
|
||||
func AttachVhd(path string) (err error) {
|
||||
handle, err := OpenVirtualDisk(
|
||||
path,
|
||||
VirtualDiskAccessNone,
|
||||
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer syscall.CloseHandle(handle)
|
||||
params := AttachVirtualDiskParameters{Version: 2}
|
||||
if err := AttachVirtualDisk(
|
||||
handle,
|
||||
AttachVirtualDiskFlagNone,
|
||||
¶ms,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
|
||||
func OpenVirtualDisk(path string, accessMask VirtualDiskAccessMask, flag VirtualDiskFlag) (syscall.Handle, error) {
|
||||
var (
|
||||
defaultType virtualStorageType
|
||||
handle syscall.Handle
|
||||
)
|
||||
parameters := openVirtualDiskParameters{Version: 2}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
uint32(accessMask),
|
||||
uint32(flag),
|
||||
func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
|
||||
parameters := OpenVirtualDiskParameters{Version: 2}
|
||||
handle, err := OpenVirtualDiskWithParameters(
|
||||
vhdPath,
|
||||
virtualDiskAccessMask,
|
||||
openVirtualDiskFlags,
|
||||
¶meters,
|
||||
&handle); err != nil {
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
|
||||
func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
vhdPath,
|
||||
uint32(virtualDiskAccessMask),
|
||||
uint32(openVirtualDiskFlags),
|
||||
parameters,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
|
||||
func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
|
||||
if err := createVirtualDisk(
|
||||
&defaultType,
|
||||
path,
|
||||
uint32(virtualDiskAccessMask),
|
||||
nil,
|
||||
uint32(createVirtualDiskFlags),
|
||||
0,
|
||||
parameters,
|
||||
nil,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical
|
||||
// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer
|
||||
// that represents the particular enumeration of the physical disk on the caller's system.
|
||||
func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
var (
|
||||
diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars
|
||||
diskPhysicalPathBuf [256]uint16
|
||||
)
|
||||
if err := getVirtualDiskPhysicalPath(
|
||||
handle,
|
||||
&diskPathSizeInBytes,
|
||||
&diskPhysicalPathBuf[0],
|
||||
); err != nil {
|
||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
||||
}
|
||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||
}
|
||||
|
||||
// CreateDiffVhd is a helper function to create a differencing virtual disk.
|
||||
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
|
||||
// Setting `ParentPath` is how to signal to create a differencing disk.
|
||||
createParams := &CreateVirtualDiskParameters{
|
||||
Version: 2,
|
||||
Version2: CreateVersion2{
|
||||
ParentPath: windows.StringToUTF16Ptr(baseVhdPath),
|
||||
BlockSizeInBytes: blockSizeInMB * 1024 * 1024,
|
||||
OpenFlags: uint32(OpenVirtualDiskFlagCachedIO),
|
||||
},
|
||||
}
|
||||
|
||||
vhdHandle, err := CreateVirtualDisk(
|
||||
diffVhdPath,
|
||||
VirtualDiskAccessNone,
|
||||
CreateVirtualDiskFlagNone,
|
||||
createParams,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
||||
}
|
||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go
generated
vendored
@@ -1,99 +0,0 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
|
||||
package vhd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
|
||||
|
||||
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
|
||||
procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
|
||||
procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
|
||||
)
|
||||
|
||||
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
|
||||
if r1 != 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
106
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
Normal file
106
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package vhd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll")
|
||||
|
||||
procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
|
||||
procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk")
|
||||
procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk")
|
||||
procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath")
|
||||
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
|
||||
)
|
||||
|
||||
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle)
|
||||
}
|
||||
|
||||
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
621
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
621
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@@ -19,6 +19,7 @@ const (
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
@@ -26,7 +27,7 @@ var (
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
@@ -37,243 +38,62 @@ func errnoErr(e syscall.Errno) error {
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
if newport == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -281,11 +101,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -302,126 +118,73 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -442,53 +205,27 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -501,22 +238,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -529,22 +258,162 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
} else {
|
||||
_p2 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
if newport == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -552,11 +421,7 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
1
vendor/github.com/Microsoft/hcsshim/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/Microsoft/hcsshim/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* text=auto eol=lf
|
||||
2
vendor/github.com/Microsoft/hcsshim/.gitignore
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/.gitignore
generated
vendored
@@ -1 +1,3 @@
|
||||
*.exe
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
17
vendor/github.com/Microsoft/hcsshim/.gometalinter.json
generated
vendored
17
vendor/github.com/Microsoft/hcsshim/.gometalinter.json
generated
vendored
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"Vendor": true,
|
||||
"Deadline": "2m",
|
||||
"Sort": [
|
||||
"linter",
|
||||
"severity",
|
||||
"path",
|
||||
"line"
|
||||
],
|
||||
"Skip": [
|
||||
"internal\\schema2"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Enable": [
|
||||
"gofmt"
|
||||
]
|
||||
}
|
||||
4
vendor/github.com/Microsoft/hcsshim/CODEOWNERS
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/CODEOWNERS
generated
vendored
@@ -1,3 +1 @@
|
||||
* @microsoft/containerplat
|
||||
|
||||
/hcn/* @nagiesek
|
||||
* @microsoft/containerplat
|
||||
25
vendor/github.com/Microsoft/hcsshim/Protobuild.toml
generated
vendored
25
vendor/github.com/Microsoft/hcsshim/Protobuild.toml
generated
vendored
@@ -25,6 +25,7 @@ plugins = ["grpc", "fieldpath"]
|
||||
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
||||
@@ -35,20 +36,14 @@ plugins = ["grpc", "fieldpath"]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
# Lock down runhcs config
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
[[descriptors]]
|
||||
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
|
||||
target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
[[descriptors]]
|
||||
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
|
||||
target = "cmd/containerd-shim-runhcs-v1/stats/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
|
||||
plugins = ["ttrpc"]
|
||||
2
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
@@ -1,6 +1,6 @@
|
||||
# hcsshim
|
||||
|
||||
[](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
|
||||
[](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
|
||||
|
||||
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
|
||||
|
||||
|
||||
43
vendor/github.com/Microsoft/hcsshim/appveyor.yml
generated
vendored
43
vendor/github.com/Microsoft/hcsshim/appveyor.yml
generated
vendored
@@ -1,43 +0,0 @@
|
||||
version: 0.1.{build}
|
||||
|
||||
image: Visual Studio 2017
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
PATH: "%GOPATH%\\bin;C:\\gometalinter-2.0.12-windows-amd64;%PATH%"
|
||||
|
||||
stack: go 1.13.4
|
||||
|
||||
build_script:
|
||||
- appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
|
||||
- 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL
|
||||
- gometalinter.exe --config .gometalinter.json ./...
|
||||
- go build ./cmd/containerd-shim-runhcs-v1
|
||||
- go build ./cmd/runhcs
|
||||
- go build ./cmd/tar2ext4
|
||||
- go build ./cmd/wclayer
|
||||
- go build ./internal/tools/grantvmgroupaccess
|
||||
- go build ./internal/tools/uvmboot
|
||||
- go build ./internal/tools/zapdir
|
||||
- go test -v ./... -tags admin
|
||||
- cd test
|
||||
- go test -v ./internal -tags admin
|
||||
- go test -c ./containerd-shim-runhcs-v1/ -tags functional
|
||||
- go test -c ./cri-containerd/ -tags functional
|
||||
- go test -c ./functional/ -tags functional
|
||||
- go test -c ./runhcs/ -tags functional
|
||||
|
||||
artifacts:
|
||||
- path: 'containerd-shim-runhcs-v1.exe'
|
||||
- path: 'runhcs.exe'
|
||||
- path: 'tar2ext4.exe'
|
||||
- path: 'wclayer.exe'
|
||||
- path: 'grantvmgroupaccess.exe'
|
||||
- path: 'uvmboot.exe'
|
||||
- path: 'zapdir.exe'
|
||||
- path: './test/containerd-shim-runhcs-v1.test.exe'
|
||||
- path: './test/cri-containerd.test.exe'
|
||||
- path: './test/functional.test.exe'
|
||||
- path: './test/runhcs.test.exe'
|
||||
38
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
Normal file
38
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttachLayerStorageFilter sets up the layer storage filter on a writable
|
||||
// container layer.
|
||||
//
|
||||
// `layerPath` is a path to a directory the writable layer is mounted. If the
|
||||
// path does not end in a `\` the platform will append it automatically.
|
||||
//
|
||||
// `layerData` is the parent read-only layer data.
|
||||
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
|
||||
title := "hcsshim.AttachLayerStorageFilter"
|
||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("layerPath", layerPath),
|
||||
)
|
||||
|
||||
bytes, err := json.Marshal(layerData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hcsAttachLayerStorageFilter(layerPath, string(bytes))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to attach layer storage filter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
26
vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
generated
vendored
Normal file
26
vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// DestroyLayer deletes a container layer.
|
||||
//
|
||||
// `layerPath` is a path to a directory containing the layer to export.
|
||||
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
|
||||
title := "hcsshim.DestroyLayer"
|
||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
||||
|
||||
err = hcsDestroyLayer(layerPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to destroy layer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
26
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
Normal file
26
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer.
|
||||
//
|
||||
// `layerPath` is a path to a directory containing the layer to export.
|
||||
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
|
||||
title := "hcsshim.DetachLayerStorageFilter"
|
||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
|
||||
|
||||
err = hcsDetachLayerStorageFilter(layerPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to detach layer storage filter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
46
vendor/github.com/Microsoft/hcsshim/computestorage/export.go
generated
vendored
Normal file
46
vendor/github.com/Microsoft/hcsshim/computestorage/export.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ExportLayer exports a container layer.
|
||||
//
|
||||
// `layerPath` is a path to a directory containing the layer to export.
|
||||
//
|
||||
// `exportFolderPath` is a pre-existing folder to export the layer to.
|
||||
//
|
||||
// `layerData` is the parent layer data.
|
||||
//
|
||||
// `options` are the export options applied to the exported layer.
|
||||
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
|
||||
title := "hcsshim.ExportLayer"
|
||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("layerPath", layerPath),
|
||||
trace.StringAttribute("exportFolderPath", exportFolderPath),
|
||||
)
|
||||
|
||||
ldbytes, err := json.Marshal(layerData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obytes, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to export layer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
26
vendor/github.com/Microsoft/hcsshim/computestorage/format.go
generated
vendored
Normal file
26
vendor/github.com/Microsoft/hcsshim/computestorage/format.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer.
|
||||
//
|
||||
// If the VHD is not mounted it will be temporarily mounted.
|
||||
func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) {
|
||||
title := "hcsshim.FormatWritableLayerVhd"
|
||||
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
|
||||
err = hcsFormatWritableLayerVhd(vhdHandle)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to format writable layer vhd")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user