Compare commits

..

9 Commits

Author SHA1 Message Date
Daniel J Walsh
e72dd9c5c8 Merge pull request #1202 from containers/skopeo_v1.2.2_try2
Skopeo v1.2.2
2021-02-18 18:05:20 -05:00
TomSweeneyRedHat
b94b7dc0f0 Bump to Skopeo v1.2.2
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-18 17:44:16 -05:00
Daniel J Walsh
3abb778b4d Merge pull request #1197 from containers/dev/tsweeney/vendordance
Bump c/common c/image and c/storage
2021-02-17 19:24:05 -05:00
TomSweeneyRedHat
f78bf42c12 Bump c/common c/image and c/storage to latest
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-17 16:13:37 -05:00
TomSweeneyRedHat
b4210c0ba0 Fix gating test in release-1.2 port #1169
Cherry pick of #1169 to get gatin tests happy.
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1914884

Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-14 12:24:07 +01:00
Miloslav Trmač
6c0e35a50c Merge pull request #1187 from containers/dev/tsweeney/protobuf1.2
Bump vendor/modules.txt in release-1.2
2021-02-05 12:15:26 +01:00
TomSweeneyRedHat
af2d2b416f Bump vendor/modules.txt in release-1.2
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-04 20:18:37 -05:00
Daniel J Walsh
a05ddb8d1d Merge pull request #1179 from containers/crypto_bump_release-1.2
Bump golang.org/x/crypto to the latest
2021-02-01 08:50:24 -05:00
TomSweeneyRedHat
8237787b53 Bump golang.org/x/crypto to the latest
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-01-30 18:45:06 -05:00
1584 changed files with 42247 additions and 129084 deletions

View File

@@ -1,213 +0,0 @@
---
# Main collection of env. vars to set for all tasks and scripts.
env:
####
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
DEST_BRANCH: "main"
# Overrides default location (/tmp/cirrus) for repo clone
GOPATH: &gopath "/var/tmp/go"
GOBIN: "${GOPATH}/bin"
GOCACHE: "${GOPATH}/cache"
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
# Required for consistency with containers/image CI
SKOPEO_PATH: *gosrc
CIRRUS_WORKING_DIR: *gosrc
# The default is 'sh' if unspecified
CIRRUS_SHELL: "/bin/bash"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
####
#### Cache-image names to test with (double-quotes around names are critical)
####
FEDORA_NAME: "fedora-34"
PRIOR_FEDORA_NAME: "fedora-33"
UBUNTU_NAME: "ubuntu-2104"
PRIOR_UBUNTU_NAME: "ubuntu-2010"
# Google-cloud VM Images
IMAGE_SUFFIX: "c6248193773010944"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}"
# Container FQIN's
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX}"
# Equivilent to image produced by 'make build-container'
SKOPEO_CI_CONTAINER_FQIN: "quay.io/skopeo/ci:${DEST_BRANCH}"
# Default timeout for each task
timeout_in: 45m
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
validate_task:
# The git-validation tool doesn't work well on branch or tag push,
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
# Only do validation for PRs.
only_if: $CIRRUS_PR != ''
container: &build_container
image: "${SKOPEO_CI_CONTAINER_FQIN}"
cpu: 4
memory: 8
script: |
make validate-local
make vendor && hack/tree_status.sh
doccheck_task:
only_if: $CIRRUS_PR != ''
depends_on:
- validate
container:
image: "${FEDORA_CONTAINER_FQIN}"
cpu: 4
memory: 8
env:
BUILDTAGS: &withopengpg 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
script: |
# TODO: Can't use 'runner.sh setup' inside container. However,
# removing the pre-installed package is the only necessary step
# at the time of this comment.
dnf erase -y skopeo # Guarantee non-interference
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
osx_task:
only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
depends_on:
- validate
macos_instance:
image: catalina-xcode
setup_script: |
export PATH=$GOPATH/bin:$PATH
brew update
brew install gpgme go go-md2man
go get -u golang.org/x/lint/golint
test_script: |
export PATH=$GOPATH/bin:$PATH
go version
go env
make validate-local test-unit-local bin/skopeo
sudo make install
/usr/local/bin/skopeo -v
cross_task:
alias: cross
only_if: *not_docs
depends_on:
- validate
gce_instance:
image_project: libpod-218412
zone: "us-central1-f"
cpu: 2
memory: "4Gb"
# Required to be 200gig, do not modify - has i/o performance impact
# according to gcloud CLI tool warning messages.
disk: 200
image_name: ${FEDORA_CACHE_IMAGE_NAME}
env:
BUILDTAGS: *withopengpg
setup_script: >-
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
cross_script: >-
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
#####
##### NOTE: This task is subtantially duplicated in the containers/image
##### repository's `.cirrus.yml`. Changes made here should be fully merged
##### prior to being manually duplicated and maintained in containers/image.
#####
test_skopeo_task:
alias: test_skopeo
only_if: *not_docs
depends_on:
- validate
gce_instance:
image_project: libpod-218412
zone: "us-central1-f"
cpu: 2
memory: "4Gb"
# Required to be 200gig, do not modify - has i/o performance impact
# according to gcloud CLI tool warning messages.
disk: 200
image_name: ${FEDORA_CACHE_IMAGE_NAME}
matrix:
- name: "Skopeo Test"
env:
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
- name: "Skopeo Test w/ opengpg"
env:
BUILDTAGS: *withopengpg
setup_script: >-
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
vendor_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
build_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
unit_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
integration_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
system_script: >
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
name: "VM img. keepalive"
alias: meta
container: &smallcontainer
cpu: 2
memory: 2
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
env:
# Space-separated list of images used by this repository state
IMGNAMES: >-
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
GCPPROJECT: libpod-218412
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
script: /usr/local/bin/entrypoint.sh
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
# of this task.
success_task:
name: "Total Success"
alias: success
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
depends_on:
- validate
- doccheck
- osx
- cross
- test_skopeo
- meta
container: *smallcontainer
env:
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
TEST_ENVIRON: container
clone_script: *noop
script: /bin/true

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "10:00"
timezone: Europe/Berlin
open-pull-requests-limit: 10

View File

@@ -1,209 +0,0 @@
---
# Please see contrib/<reponame>image/README.md for details on the intentions
# of this workflow.
#
# BIG FAT WARNING: This workflow is duplicated across containers/skopeo,
# containers/buildah, and containers/podman. ANY AND
# ALL CHANGES MADE HERE MUST BE MANUALLY DUPLICATED
# TO THE OTHER REPOS.
name: build multi-arch images
on:
# Upstream tends to be very active, with many merges per day.
# Only run this daily via cron schedule, or manually, not by branch push.
schedule:
- cron: '0 8 * * *'
# allows to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
multi:
name: multi-arch image build
env:
REPONAME: skopeo # No easy way to parse this out of $GITHUB_REPOSITORY
# Server/namespace value used to format FQIN
REPONAME_QUAY_REGISTRY: quay.io/skopeo
CONTAINERS_QUAY_REGISTRY: quay.io/containers
# list of architectures for build
PLATFORMS: linux/amd64,linux/s390x,linux/ppc64le,linux/arm64
# Command to execute in container to obtain project version number
VERSION_CMD: "--version" # skopeo is the entrypoint
# build several images (upstream, testing, stable) in parallel
strategy:
# By default, failure of one matrix item cancels all others
fail-fast: false
matrix:
# Builds are located under contrib/<reponame>image/<source> directory
source:
- upstream
- testing
- stable
runs-on: ubuntu-latest
# internal registry caches build for inspection before push
services:
registry:
image: quay.io/libpod/registry:2
ports:
- 5000:5000
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: network=host
install: true
- name: Build and locally push image
uses: docker/build-push-action@v2
with:
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: true
tags: localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
# Simple verification that stable images work, and
# also grab version number use in forming the FQIN.
- name: amd64 container sniff test
if: matrix.source == 'stable'
id: sniff_test
run: |
podman pull --tls-verify=false \
localhost:5000/$REPONAME/${{ matrix.source }}
VERSION_OUTPUT=$(podman run \
localhost:5000/$REPONAME/${{ matrix.source }} \
$VERSION_CMD)
echo "$VERSION_OUTPUT"
VERSION=$(awk -r -e "/^${REPONAME} version /"'{print $3}' <<<"$VERSION_OUTPUT")
test -n "$VERSION"
echo "::set-output name=version::$VERSION"
- name: Generate image FQIN(s) to push
id: reponame_reg
run: |
if [[ "${{ matrix.source }}" == 'stable' ]]; then
# The command version in image just built
VERSION='v${{ steps.sniff_test.outputs.version }}'
# workaround vim syntax-highlight bug: '
# Push both new|updated version-tag and latest-tag FQINs
FQIN="$REPONAME_QUAY_REGISTRY/stable:$VERSION,$REPONAME_QUAY_REGISTRY/stable:latest"
elif [[ "${{ matrix.source }}" == 'testing' ]]; then
# Assume some contents changed, always push latest testing.
FQIN="$REPONAME_QUAY_REGISTRY/testing:latest"
elif [[ "${{ matrix.source }}" == 'upstream' ]]; then
# Assume some contents changed, always push latest upstream.
FQIN="$REPONAME_QUAY_REGISTRY/upstream:latest"
else
echo "::error::Unknown matrix item '${{ matrix.source }}'"
exit 1
fi
echo "::warning::Pushing $FQIN"
echo "::set-output name=fqin::${FQIN}"
echo '::set-output name=push::true'
# This is substantially similar to the above logic,
# but only handles $CONTAINERS_QUAY_REGISTRY for
# the stable "latest" and named-version tagged images.
- name: Generate containers reg. image FQIN(s)
if: matrix.source == 'stable'
id: containers_reg
run: |
VERSION='v${{ steps.sniff_test.outputs.version }}'
# workaround vim syntax-highlight bug: '
# Push both new|updated version-tag and latest-tag FQINs
FQIN="$CONTAINERS_QUAY_REGISTRY/$REPONAME:$VERSION,$CONTAINERS_QUAY_REGISTRY/$REPONAME:latest"
echo "::warning::Pushing $FQIN"
echo "::set-output name=fqin::${FQIN}"
echo '::set-output name=push::true'
- name: Define LABELS multi-line env. var. value
run: |
# This is a really hacky/strange workflow idiom, required
# for setting multi-line $LABELS value for consumption in
# a future step. There is literally no cleaner way to do this :<
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#multiline-strings
function set_labels() {
echo 'LABELS<<DELIMITER' >> "$GITHUB_ENV"
for line; do
echo "$line" | tee -a "$GITHUB_ENV"
done
echo "DELIMITER" >> "$GITHUB_ENV"
}
declare -a lines
lines=(\
"org.opencontainers.image.source=https://github.com/${GITHUB_REPOSITORY}.git"
"org.opencontainers.image.revision=${GITHUB_SHA}"
"org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
)
# Only the 'stable' matrix source obtains $VERSION
if [[ "${{ matrix.source }}" == "stable" ]]; then
lines+=(\
"org.opencontainers.image.version=${{ steps.sniff_test.outputs.version }}"
)
fi
set_labels "${lines[@]}"
# Separate steps to login and push for $REPONAME_QUAY_REGISTRY and
# $CONTAINERS_QUAY_REGISTRY are required, because 2 sets of credentials
# are used and namespaced within the registry. At the same time, reuse
# of non-shell steps is not supported by Github Actions nor are YAML
# anchors/aliases, nor composite actions.
# Push to $REPONAME_QUAY_REGISTRY for stable, testing. and upstream
- name: Login to ${{ env.REPONAME_QUAY_REGISTRY }}
uses: docker/login-action@v1
if: steps.reponame_reg.outputs.push == 'true'
with:
registry: ${{ env.REPONAME_QUAY_REGISTRY }}
# N/B: Secrets are not passed to workflows that are triggered
# by a pull request from a fork
username: ${{ secrets.REPONAME_QUAY_USERNAME }}
password: ${{ secrets.REPONAME_QUAY_PASSWORD }}
- name: Push images to ${{ steps.reponame_reg.outputs.fqin }}
uses: docker/build-push-action@v2
if: steps.reponame_reg.outputs.push == 'true'
with:
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
cache-to: type=inline
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: true
tags: ${{ steps.reponame_reg.outputs.fqin }}
labels: |
${{ env.LABELS }}
# Push to $CONTAINERS_QUAY_REGISTRY only stable
- name: Login to ${{ env.CONTAINERS_QUAY_REGISTRY }}
if: steps.containers_reg.outputs.push == 'true'
uses: docker/login-action@v1
with:
registry: ${{ env.CONTAINERS_QUAY_REGISTRY}}
username: ${{ secrets.CONTAINERS_QUAY_USERNAME }}
password: ${{ secrets.CONTAINERS_QUAY_PASSWORD }}
- name: Push images to ${{ steps.containers_reg.outputs.fqin }}
if: steps.containers_reg.outputs.push == 'true'
uses: docker/build-push-action@v2
with:
cache-from: type=registry,ref=localhost:5000/${{ env.REPONAME }}/${{ matrix.source }}
cache-to: type=inline
context: contrib/${{ env.REPONAME }}image/${{ matrix.source }}
file: ./contrib/${{ env.REPONAME }}image/${{ matrix.source }}/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: true
tags: ${{ steps.containers_reg.outputs.fqin }}
labels: |
${{ env.LABELS }}

View File

@@ -1,25 +0,0 @@
name: Mark stale issues and pull requests
# Please refer to https://github.com/actions/stale/blob/master/action.yml
# to see all config knobs of the stale action.
on:
schedule:
- cron: "0 0 * * *"
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
stale-issue-label: 'stale-issue'
stale-pr-label: 'stale-pr'
days-before-stale: 30
days-before-close: 365
remove-stale-when-updated: true

3
.gitignore vendored
View File

@@ -5,6 +5,3 @@ result
# ignore JetBrains IDEs (GoLand) config folder
.idea
# Ignore the bin directory
bin

101
.travis.yml Normal file
View File

@@ -0,0 +1,101 @@
language: go
go:
- 1.15.x
notifications:
email: false
env:
global:
# Multiarch manifest will support architectures from this list. It should be the same architectures, as ones in image-build-push step in this Travis config
- MULTIARCH_MANIFEST_ARCHITECTURES="amd64 s390x ppc64le"
# env variables for stable image build
- STABLE_IMAGE=quay.io/skopeo/stable:v1.2.0
- EXTRA_STABLE_IMAGE=quay.io/containers/skopeo:v1.2.0
# env variable for upstream image build
- UPSTREAM_IMAGE=quay.io/skopeo/upstream:master
# Just declaration of the image-build-push step with script actions to execute
x_base_steps:
- &image-build-push
services:
- docker
os: linux
dist: focal
script:
# skopeo upstream image build
- make -f release/Makefile build-image/upstream
# Push image in case if build is started via cron job or code is pushed to master
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
make -f release/Makefile push-image/upstream ;
fi
# skopeo stable image build
- make -f release/Makefile build-image/stable
# Push image in case if build is started via cron job or code is pushed to master
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
make -f release/Makefile push-image/stable ;
fi
# Just declaration of stage order to run
stages:
# Test for local build
- local-build
# Build and push image for 1 architecture
- name: image-build-push
if: branch = master
# Create and push image manifest to have multiarch image on top of architecture specific images
- name: manifest-multiarch-push
if: (type IN (push, cron)) AND branch = master
# Actual execution of steps
jobs:
include:
# Run 2 local-build steps in parallel for osx and linux/amd64 platforms
- stage: local-build
<<: *local-build
name: local build for osx
os: osx
osx_image: xcode11.3
install:
# Ideally, the (brew update) should not be necessary and Travis would have fairly
# frequently updated OS images; that's not been the case historically.
# In particular, explicitly unlink python@2, which has been removed from Homebrew
# since the last OS image build (as of July 2020), but the Travis OS still
# contains it, and it prevents updating of Python 3.
- brew update && brew unlink python@2 && brew install gpgme
script:
- hack/travis_osx.sh
- stage: local-build
<<: *local-build
name: local build for linux
os: linux
services:
- docker
script:
- make vendor && ./hack/tree_status.sh && make local-cross && make check
# Run 3 image-build-push tasks in parallel for linux/amd64, linux/s390x and linux/ppc64le platforms (for upstream and stable)
- stage: image-build-push
<<: *image-build-push
name: images for amd64
arch: amd64
- stage: image-build-push
<<: *image-build-push
name: images for s390x
arch: s390x
- stage: image-build-push
<<: *image-build-push
name: images for ppc64le
arch: ppc64le
# Run final task to generate multi-arch image manifests (for upstream and stable)
- stage: manifest-multiarch-push
os: linux
dist: focal
script:
- make -f release/Makefile push-manifest-multiarch/upstream
- make -f release/Makefile push-manifest-multiarch/stable

View File

@@ -1,3 +1,3 @@
## The skopeo Project Community Code of Conduct
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
The skopeo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).

View File

@@ -117,31 +117,29 @@ commit automatically with `git commit -s`.
### Dependencies management
Dependencies are managed via [standard go modules](https://golang.org/ref/mod).
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
In order to add a new dependency to this project:
- use `go get -d path/to/dep@version` to add a new line to `go.mod`
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
- run `make vendor`
In order to update an existing dependency:
- use `go get -d -u path/to/dep@version` to update the relevant dependency line in `go.mod`
- update the relevant dependency line in `vendor.conf`
- run `make vendor`
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
- create out a new branch in your `skopeo` checkout and switch to it
- find out the version of `containers/image` you want to use and note its commit ID. You might also want to use a fork of `containers/image`, in that case note its repo
- use `go get -d github.com/$REPO/image/v5@$COMMIT_ID` to download the right version. The command will fetch the dependency and then fail because of a conflict in `go.mod`, this is expected. Note the pseudo-version (eg. `v5.13.1-0.20210707123201-50afbf0a326`)
- use `go mod edit -replace=github.com/containers/image/v5=github.com/$REPO/image/v5@$PSEUDO_VERSION` to add a replacement line to `go.mod` (e.g. `replace github.com/containers/image/v5 => github.com/moio/image/v5 v5.13.1-0.20210707123201-50afbf0a3262`)
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
- run `make vendor`
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
- optionally add new integration tests to the skopeo repo
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
- iterate until tests pass and the PR is reviewed
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
- as soon as possible after that, in the skopeo PR, use `go mod edit -dropreplace=github.com/containers/image` to remove the `replace` line in `go.mod`
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
- run `make vendor`
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
- after tests complete successfully again, merge the skopeo PR

View File

@@ -1,4 +1,4 @@
FROM registry.fedoraproject.org/fedora:latest
FROM fedora
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2man \
# storage deps
@@ -21,7 +21,6 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
RUN set -x \
&& export GO111MODULE=off \
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
&& export GOPATH="$(mktemp -d)" \
@@ -35,7 +34,6 @@ RUN set -x \
&& rm -rf "$GOPATH"
RUN set -x \
&& export GO111MODULE=off \
&& export GOPATH=$(mktemp -d) \
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
@@ -48,7 +46,6 @@ RUN set -x \
ENV GOPATH /usr/share/gocode:/go
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
ENV container_magic 85531765-346b-4316-bdb8-358e4cca9e5d
RUN go version
WORKDIR /go/src/github.com/containers/skopeo
COPY . /go/src/github.com/containers/skopeo

View File

@@ -1,12 +1,9 @@
FROM registry.fedoraproject.org/fedora:33
FROM golang:1.14-buster
RUN dnf update -y && \
dnf install -y \
btrfs-progs-devel \
device-mapper-devel \
golang \
gpgme-devel \
make
RUN apt-get update && \
apt-get install -y \
libdevmapper-dev \
libgpgme11-dev
ENV GOPATH=/
WORKDIR /src/github.com/containers/skopeo

View File

@@ -2,23 +2,26 @@
export GOPROXY=https://proxy.golang.org
# On some plaforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
ifeq ($(shell uname),Darwin)
PREFIX ?= ${DESTDIR}/usr/local
DARWIN_BUILD_TAG=
# On macOS, (brew install gpgme) installs it within /usr/local, but /usr/local/include is not in the default search path.
# Rather than hard-code this directory, use gpgme-config. Sadly that must be done at the top-level user
# instead of locally in the gpgme subpackage, because cgo supports only pkg-config, not general shell scripts,
# and gpgme does not install a pkg-config file.
# If gpgme is not installed or gpgme-config cant be found for other reasons, the error is silently ignored
# (and the user will probably find out because the cgo compilation will fail).
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
else
PREFIX ?= ${DESTDIR}/usr
endif
# The following variables very roughly follow https://www.gnu.org/prep/standards/standards.html#Makefile-Conventions .
DESTDIR ?=
PREFIX ?= /usr/local
CONTAINERSCONFDIR ?= /etc/containers
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
SIGSTOREDIR ?= /var/lib/containers/sigstore
BINDIR ?= ${PREFIX}/bin
MANDIR ?= ${PREFIX}/share/man
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
INSTALLDIR=${PREFIX}/bin
MANINSTALLDIR=${PREFIX}/share/man
CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
SIGSTOREDIR=${DESTDIR}/var/lib/containers/sigstore
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
GO ?= go
GOBIN := $(shell $(GO) env GOBIN)
@@ -29,10 +32,6 @@ ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
# Required for integration-tests to detect they are running inside a specific
# container image. Env. var defined in image, make does not automatically
# pass to children unless explicitly exported
export container_magic
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
@@ -76,7 +75,7 @@ MANPAGES ?= $(MANPAGES_MD:%.md=%)
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG)
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
BUILDTAGS += $(LOCAL_BUILD_TAGS)
ifeq ($(DISABLE_CGO), 1)
@@ -115,11 +114,8 @@ binary: cmd/skopeo
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run \
-f channel:nixos-21.05 nix-prefetch-git \
-c nix-prefetch-git \
--no-deepClone \
https://github.com/nixos/nixpkgs refs/heads/nixos-21.05 > nix/nixpkgs.json
@nix run -f channel:nixos-20.09 nix-prefetch-git -c nix-prefetch-git \
--no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
@@ -153,23 +149,23 @@ clean:
rm -rf bin docs/*.1
install: install-binary install-docs install-completions
install -d -m 755 ${DESTDIR}${SIGSTOREDIR}
install -d -m 755 ${DESTDIR}${CONTAINERSCONFDIR}
install -m 644 default-policy.json ${DESTDIR}${CONTAINERSCONFDIR}/policy.json
install -d -m 755 ${DESTDIR}${REGISTRIESDDIR}
install -m 644 default.yaml ${DESTDIR}${REGISTRIESDDIR}/default.yaml
install -d -m 755 ${SIGSTOREDIR}
install -d -m 755 ${CONTAINERSSYSCONFIGDIR}
install -m 644 default-policy.json ${CONTAINERSSYSCONFIGDIR}/policy.json
install -d -m 755 ${REGISTRIESDDIR}
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
install-binary: bin/skopeo
install -d -m 755 ${DESTDIR}${BINDIR}
install -m 755 bin/skopeo ${DESTDIR}${BINDIR}/skopeo
install -d -m 755 ${INSTALLDIR}
install -m 755 bin/skopeo ${INSTALLDIR}/skopeo
install-docs: docs
install -d -m 755 ${DESTDIR}${MANDIR}/man1
install -m 644 docs/*.1 ${DESTDIR}${MANDIR}/man1
install -d -m 755 ${MANINSTALLDIR}/man1
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
install-completions:
install -m 755 -d ${DESTDIR}${BASHCOMPLETIONSDIR}
install -m 644 completions/bash/skopeo ${DESTDIR}${BASHCOMPLETIONSDIR}/skopeo
install -m 755 -d ${BASHINSTALLDIR}
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
shell: build-container
$(CONTAINER_RUN) bash
@@ -178,11 +174,7 @@ check: validate test-unit test-integration test-system
# The tests can run out of entropy and block in containers, so replace /dev/random.
test-integration: build-container
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-integration-local'
# Intended for CI, shortcut 'build-container' since already running inside container.
test-integration-local:
hack/make.sh test-integration
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
# complicated set of options needed to run podman-in-podman
test-system: build-container
@@ -190,35 +182,24 @@ test-system: build-container
$(CONTAINER_CMD) --privileged \
-v $$DTEMP:/var/lib/containers:Z -v /run/systemd/journal/socket:/run/systemd/journal/socket \
"$(IMAGE)" \
bash -c 'BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-system-local'; \
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
rc=$$?; \
$(RM) -rf $$DTEMP; \
exit $$rc
# Intended for CI, shortcut 'build-container' since already running inside container.
test-system-local:
hack/make.sh test-system
test-unit: build-container
# Just call (make test unit-local) here instead of worrying about environment differences
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
validate: build-container
$(CONTAINER_RUN) make validate-local
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
test-all-local: validate-local validate-docs test-unit-local
test-all-local: validate-local test-unit-local
.PHONY: validate-local
validate-local:
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
# This invokes bin/skopeo, hence cannot be run as part of validate-local
.PHONY: validate-docs
validate-docs:
hack/man-page-checker
hack/xref-helpmsgs-manpages
test-unit-local:
$(GPGME_ENV) $(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
@@ -228,4 +209,4 @@ vendor:
$(GO) mod verify
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor

View File

@@ -166,11 +166,11 @@ $ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
skopeo uses credentials from the --creds (for skopeo inspect|delete) or --src-creds|--dest-creds (for skopeo copy) flags, if set; otherwise it uses configuration set by skopeo login, podman login, buildah login, or docker login.
```console
$ skopeo login --username USER myregistrydomain.com:5000
$ skopeo login --username USER docker://myregistrydomain.com:5000
Password:
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
$ skopeo logout myregistrydomain.com:5000
$ skopeo logout docker://myregistrydomain.com:5000
```
#### Using --creds directly

View File

@@ -1,3 +1,3 @@
## Security and Disclosure Information Policy for the skopeo Project
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.
The skopeo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.

View File

@@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/containers/common/pkg/retry"
@@ -13,40 +12,38 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/spf13/cobra"
encconfig "github.com/containers/ocicrypt/config"
enchelpers "github.com/containers/ocicrypt/helpers"
"github.com/spf13/cobra"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type copyOptions struct {
global *globalOptions
deprecatedTLSVerify *deprecatedTLSVerifyOption
srcImage *imageOptions
destImage *imageDestOptions
retryOpts *retry.RetryOptions
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
digestFile string // Write digest to this file
format optionalString // Force conversion of the image to a specified format
quiet bool // Suppress output information when copying images
all bool // Copy all of the images if the source is a list
encryptLayer []int // The list of layers to encrypt
encryptionKeys []string // Keys needed to encrypt the image
decryptionKeys []string // Keys needed to decrypt the image
global *globalOptions
srcImage *imageOptions
destImage *imageDestOptions
retryOpts *retry.RetryOptions
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
format optionalString // Force conversion of the image to a specified format
quiet bool // Suppress output information when copying images
all bool // Copy all of the images if the source is a list
encryptLayer []int // The list of layers to encrypt
encryptionKeys []string // Keys needed to encrypt the image
decryptionKeys []string // Keys needed to decrypt the image
}
func copyCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
srcFlags, srcOpts := imageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
destFlags, destOpts := imageDestFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := copyOptions{global: global,
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
srcImage: srcOpts,
destImage: destOpts,
retryOpts: retryOpts,
srcImage: srcOpts,
destImage: destOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
@@ -64,7 +61,6 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
adjustUsage(cmd)
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
@@ -73,8 +69,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)`)
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", []string{}, "*Experimental* key needed to decrypt the image")
@@ -85,7 +80,6 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
if len(args) != 2 {
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
}
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
imageNames := args
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
@@ -118,9 +112,15 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
var manifestType string
if opts.format.present {
manifestType, err = parseManifestFormat(opts.format.value)
if err != nil {
return err
switch opts.format.value {
case "oci":
manifestType = imgspecv1.MediaTypeImageManifest
case "v2s1":
manifestType = manifest.DockerV2Schema1SignedMediaType
case "v2s2":
manifestType = manifest.DockerV2Schema2MediaType
default:
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
}
}
@@ -184,7 +184,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
}
return retry.RetryIfNecessary(ctx, func() error {
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
@@ -196,18 +196,6 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
if err != nil {
return err
}
if opts.digestFile != "" {
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return err
}
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
}
}
return nil
return err
}, opts.retryOpts)
}

View File

@@ -20,7 +20,7 @@ type deleteOptions struct {
func deleteCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := deleteOptions{
global: global,

View File

@@ -34,7 +34,7 @@ type inspectOptions struct {
func inspectCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := inspectOptions{
global: global,
@@ -222,6 +222,13 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return printTmpl(row, data)
}
func inspectNormalize(row string) string {
r := strings.NewReplacer(
".ImageID", ".Image",
)
return r.Replace(row)
}
func printTmpl(row string, data []interface{}) error {
t, err := template.New("skopeo inspect").Parse(row)
if err != nil {

View File

@@ -25,7 +25,7 @@ type layersOptions struct {
func layersCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := layersOptions{
global: global,

View File

@@ -30,7 +30,7 @@ type tagsOptions struct {
func tagsCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, nil, "", "")
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := tagsOptions{

View File

@@ -5,7 +5,6 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Tests the kinds of inputs allowed and expected to the command
@@ -18,10 +17,10 @@ func TestDockerRepositoryReferenceParser(t *testing.T) {
} {
ref, err := parseDockerRepositoryReference(test[0])
require.NoError(t, err)
expected, err := alltransports.ParseImageName(test[0])
require.NoError(t, err)
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
if assert.NoError(t, err, "Could not parse, got error on %v", test[0]) {
assert.Equal(t, expected.DockerReference().Name(), ref.DockerReference().Name(), "Mismatched parse result for input %v", test[0])
}
}
for _, test := range [][]string{

View File

@@ -12,6 +12,7 @@ import (
type loginOptions struct {
global *globalOptions
loginOpts auth.LoginOptions
getLogin optionalBool
tlsVerify optionalBool
}
@@ -20,7 +21,7 @@ func loginCmd(global *globalOptions) *cobra.Command {
global: global,
}
cmd := &cobra.Command{
Use: "login [command options] REGISTRY",
Use: "login",
Short: "Login to a container registry",
Long: "Login to a container registry on a specified server.",
RunE: commandAction(opts.run),
@@ -38,7 +39,6 @@ func (opts *loginOptions) run(args []string, stdout io.Writer) error {
defer cancel()
opts.loginOpts.Stdout = stdout
opts.loginOpts.Stdin = os.Stdin
opts.loginOpts.AcceptRepositories = true
sys := opts.global.newSystemContext()
if opts.tlsVerify.present {
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)

View File

@@ -4,14 +4,12 @@ import (
"io"
"github.com/containers/common/pkg/auth"
"github.com/containers/image/v5/types"
"github.com/spf13/cobra"
)
type logoutOptions struct {
global *globalOptions
logoutOpts auth.LogoutOptions
tlsVerify optionalBool
}
func logoutCmd(global *globalOptions) *cobra.Command {
@@ -19,25 +17,19 @@ func logoutCmd(global *globalOptions) *cobra.Command {
global: global,
}
cmd := &cobra.Command{
Use: "logout [command options] REGISTRY",
Use: "logout",
Short: "Logout of a container registry",
Long: "Logout of a container registry on a specified server.",
RunE: commandAction(opts.run),
Example: `skopeo logout quay.io`,
}
adjustUsage(cmd)
flags := cmd.Flags()
optionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
flags.AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
cmd.Flags().AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
return cmd
}
func (opts *logoutOptions) run(args []string, stdout io.Writer) error {
opts.logoutOpts.Stdout = stdout
opts.logoutOpts.AcceptRepositories = true
sys := opts.global.newSystemContext()
if opts.tlsVerify.present {
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
}
return auth.Logout(sys, &opts.logoutOpts, args)
}

View File

@@ -17,8 +17,6 @@ import (
// and will be populated by the Makefile
var gitCommit = ""
var defaultUserAgent = "skopeo/" + version.Version
type globalOptions struct {
debug bool // Enable debug output
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
@@ -45,17 +43,6 @@ func createApp() (*cobra.Command, *globalOptions) {
},
SilenceUsage: true,
SilenceErrors: true,
// Currently, skopeo uses manually written completions. Cobra allows
// for auto-generating completions for various shells. Podman is
// already making us of that. If Skopeo decides to follow, please
// remove the line below (and hide the `completion` command).
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
// This is documented to parse "local" (non-PersistentFlags) flags of parent commands before
// running subcommands and handling their options. We don't really run into such cases,
// because all of our flags on rootCommand are in PersistentFlags, except for the deprecated --tls-verify;
// in that case we need TraverseChildren so that we can distinguish between
// (skopeo --tls-verify inspect) (causes a warning) and (skopeo inspect --tls-verify) (no warning).
TraverseChildren: true,
}
if gitCommit != "" {
rootCommand.Version = fmt.Sprintf("%s commit: %s", version.Version, gitCommit)
@@ -66,6 +53,8 @@ func createApp() (*cobra.Command, *globalOptions) {
var dummyVersion bool
rootCommand.Flags().BoolVarP(&dummyVersion, "version", "v", false, "Version for Skopeo")
rootCommand.PersistentFlags().BoolVar(&opts.debug, "debug", false, "enable debug output")
flag := optionalBoolFlag(rootCommand.PersistentFlags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
flag.Hidden = true
rootCommand.PersistentFlags().StringVar(&opts.policyPath, "policy", "", "Path to a trust policy file")
rootCommand.PersistentFlags().BoolVar(&opts.insecurePolicy, "insecure-policy", false, "run the tool without any policy check")
rootCommand.PersistentFlags().StringVar(&opts.registriesDirPath, "registries.d", "", "use registry configuration files in `DIR` (e.g. for container signature storage)")
@@ -78,8 +67,6 @@ func createApp() (*cobra.Command, *globalOptions) {
logrus.Fatal("unable to mark registries-conf flag as hidden")
}
rootCommand.PersistentFlags().StringVar(&opts.tmpDir, "tmpdir", "", "directory used to store temporary files")
flag := optionalBoolFlag(rootCommand.Flags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
flag.Hidden = true
rootCommand.AddCommand(
copyCmd(&opts),
deleteCmd(&opts),
@@ -156,7 +143,6 @@ func (opts *globalOptions) newSystemContext() *types.SystemContext {
VariantChoice: opts.overrideVariant,
SystemRegistriesConfPath: opts.registriesConfPath,
BigFilesTemporaryDir: opts.tmpDir,
DockerRegistryUserAgent: defaultUserAgent,
}
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
if opts.tlsVerify.present {

View File

@@ -23,10 +23,7 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
// Default state
opts, _ := fakeGlobalOptions(t, []string{})
res := opts.newSystemContext()
assert.Equal(t, &types.SystemContext{
// User-Agent is set by default.
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
// Set everything to non-default values.
opts, _ = fakeGlobalOptions(t, []string{
"--registries.d", "/srv/registries.d",
@@ -46,6 +43,5 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
BigFilesTemporaryDir: "/srv",
SystemRegistriesConfPath: "/srv/registries.conf",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerRegistryUserAgent: defaultUserAgent,
}, res)
}

View File

@@ -16,7 +16,7 @@ type manifestDigestOptions struct {
func manifestDigestCmd() *cobra.Command {
var opts manifestDigestOptions
cmd := &cobra.Command{
Use: "manifest-digest MANIFEST-FILE",
Use: "manifest-digest MANIFEST",
Short: "Compute a manifest digest of a file",
RunE: commandAction(opts.run),
Example: "skopeo manifest-digest manifest.json",

View File

@@ -18,7 +18,7 @@ type standaloneSignOptions struct {
func standaloneSignCmd() *cobra.Command {
opts := standaloneSignOptions{}
cmd := &cobra.Command{
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT --output|-o SIGNATURE",
Use: "standalone-sign [command options] MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
Short: "Create a signature using local files",
RunE: commandAction(opts.run),
}

View File

@@ -27,18 +27,16 @@ import (
// syncOptions contains information retrieved from the skopeo sync command line.
type syncOptions struct {
global *globalOptions // Global (not command dependent) skopeo options
deprecatedTLSVerify *deprecatedTLSVerifyOption
srcImage *imageOptions // Source image options
destImage *imageDestOptions // Destination image options
retryOpts *retry.RetryOptions
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
format optionalString // Force conversion of the image to a specified format
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
all bool // Copy all of the images if an image in the source is a list
global *globalOptions // Global (not command dependent) skopeo options
srcImage *imageOptions // Source image options
destImage *imageDestOptions // Destination image options
retryOpts *retry.RetryOptions
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
all bool // Copy all of the images if an image in the source is a list
}
// repoDescriptor contains information of a single repository used as a sync source.
@@ -48,7 +46,7 @@ type repoDescriptor struct {
Context *types.SystemContext // SystemContext for the sync command
}
// tlsVerifyConfig is an implementation of the Unmarshaler interface, used to
// tlsVerify is an implementation of the Unmarshaler interface, used to
// customize the unmarshaling behaviour of the tls-verify YAML key.
type tlsVerifyConfig struct {
skip types.OptionalBool // skip TLS verification check (false by default)
@@ -69,29 +67,27 @@ type sourceConfig map[string]registrySyncConfig
func syncCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
deprecatedTLSVerifyFlags, deprecatedTLSVerifyOpt := deprecatedTLSVerifyFlags()
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "src-", "screds")
destFlags, destOpts := dockerImageFlags(global, sharedOpts, deprecatedTLSVerifyOpt, "dest-", "dcreds")
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := syncOptions{
global: global,
deprecatedTLSVerify: deprecatedTLSVerifyOpt,
srcImage: srcOpts,
destImage: &imageDestOptions{imageOptions: destOpts},
retryOpts: retryOpts,
global: global,
srcImage: srcOpts,
destImage: &imageDestOptions{imageOptions: destOpts},
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "sync [command options] --src TRANSPORT --dest TRANSPORT SOURCE DESTINATION",
Use: "sync [command options] --src SOURCE-LOCATION --dest DESTINATION-LOCATION SOURCE DESTINATION",
Short: "Synchronize one or more images from one location to another",
Long: `Copy all the images from a SOURCE to a DESTINATION.
Long: fmt.Sprint(`Copy all the images from a SOURCE to a DESTINATION.
Allowed SOURCE transports (specified with --src): docker, dir, yaml.
Allowed DESTINATION transports (specified with --dest): docker, dir.
See skopeo-sync(1) for details.
`,
`),
RunE: commandAction(opts.run),
Example: `skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb`,
}
@@ -99,20 +95,18 @@ See skopeo-sync(1) for details.
flags := cmd.Flags()
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&deprecatedTLSVerifyFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
// UnmarshalYAML is the implementation of the Unmarshaler interface method
// unmarshalYAML is the implementation of the Unmarshaler interface method
// method for the tlsVerifyConfig type.
// It unmarshals the 'tls-verify' YAML key so that, when they key is not
// specified, tls verification is enforced.
@@ -211,6 +205,7 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
// Some registries may decide to block the "list all tags" endpoint.
// Gracefully allow the sync to continue in this case.
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
break
default:
return tags, errors.Wrapf(err, "Error determining repository tags for image %s", name)
}
@@ -243,7 +238,7 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
return sourceReferences, nil
}
// imagesToCopyFromDir builds a list of image references from the images found
// imagesTopCopyFromDir builds a list of image references from the images found
// in the source directory.
// It returns an image reference slice with as many elements as the images found
// and any error encountered.
@@ -273,7 +268,7 @@ func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
return sourceReferences, nil
}
// imagesToCopyFromRegistry builds a list of repository descriptors from the images
// imagesTopCopyFromDir builds a list of repository descriptors from the images
// in a registry configuration.
// It returns a repository descriptors slice with as many elements as the images
// found and any error encountered. Each element of the slice is a list of
@@ -496,7 +491,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
if len(args) != 2 {
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
}
opts.deprecatedTLSVerify.warnIfUsed([]string{"--src-tls-verify", "--dest-tls-verify"})
policyContext, err := opts.global.getPolicyContext()
if err != nil {
@@ -542,14 +536,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
var manifestType string
if opts.format.present {
manifestType, err = parseManifestFormat(opts.format.value)
if err != nil {
return err
}
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
@@ -570,13 +556,11 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
imagesNumber := 0
options := copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: os.Stdout,
DestinationCtx: destinationCtx,
ImageListSelection: imageListSelection,
OptimizeDestinationImageAlreadyExists: true,
ForceManifestMIMEType: manifestType,
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: os.Stdout,
DestinationCtx: destinationCtx,
ImageListSelection: imageListSelection,
}
for _, srcRepo := range srcRepoList {

View File

@@ -2,6 +2,10 @@
package main
func maybeReexec() error {
return nil
}
func reexecIfNecessaryForImages(inputImageNames ...string) error {
return nil
}

View File

@@ -2,19 +2,15 @@ package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -39,35 +35,6 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
}
}
// deprecatedTLSVerifyOption represents a deprecated --tls-verify option,
// which was accepted for all subcommands, for a time.
// Every user should call deprecatedTLSVerifyOption.warnIfUsed() as part of handling the CLI,
// whether or not the value actually ends up being used.
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
type deprecatedTLSVerifyOption struct {
tlsVerify optionalBool // FIXME FIXME: Warn if this is used, or even if it is ignored.
}
// warnIfUsed warns if tlsVerify was set by the user, and suggests alternatives (which should
// start with "--").
// Every user should call this as part of handling the CLI, whether or not the value actually
// ends up being used.
func (opts *deprecatedTLSVerifyOption) warnIfUsed(alternatives []string) {
if opts.tlsVerify.present {
logrus.Warnf("'--tls-verify' is deprecated, instead use: %s", strings.Join(alternatives, ", "))
}
}
// deprecatedTLSVerifyFlags prepares the CLI flag writing into deprecatedTLSVerifyOption, and the managed deprecatedTLSVerifyOption structure.
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
func deprecatedTLSVerifyFlags() (pflag.FlagSet, *deprecatedTLSVerifyOption) {
opts := deprecatedTLSVerifyOption{}
fs := pflag.FlagSet{}
flag := optionalBoolFlag(&fs, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the container registry (defaults to true)")
flag.Hidden = true
return fs, &opts
}
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
type sharedImageOptions struct {
@@ -86,15 +53,14 @@ func sharedImageFlags() (pflag.FlagSet, *sharedImageOptions) {
// the same across subcommands, but may be different for each image
// (e.g. may differ between the source and destination of a copy)
type dockerImageOptions struct {
global *globalOptions // May be shared across several imageOptions instances.
shared *sharedImageOptions // May be shared across several imageOptions instances.
deprecatedTLSVerify *deprecatedTLSVerifyOption // May be shared across several imageOptions instances, or nil.
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
credsOption optionalString // username[:password] for accessing a registry
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
noCreds bool // Access the registry anonymously
global *globalOptions // May be shared across several imageOptions instances.
shared *sharedImageOptions // May be shared across several imageOptions instances.
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
credsOption optionalString // username[:password] for accessing a registry
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
noCreds bool // Access the registry anonymously
}
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
@@ -107,12 +73,11 @@ type imageOptions struct {
// dockerImageFlags prepares a collection of docker-transport specific CLI flags
// writing into imageOptions, and the managed imageOptions structure.
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
flags := imageOptions{
dockerImageOptions: dockerImageOptions{
global: global,
shared: shared,
deprecatedTLSVerify: deprecatedTLSVerify,
global: global,
shared: shared,
},
}
@@ -136,8 +101,8 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, depreca
}
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
dockerFlags, opts := dockerImageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageOptions) {
dockerFlags, opts := dockerImageFlags(global, shared, flagPrefix, credsOptionAlias)
fs := pflag.FlagSet{}
fs.StringVar(&opts.sharedBlobDir, flagPrefix+"shared-blob-dir", "", "`DIRECTORY` to use to share blobs across OCI repositories")
@@ -146,6 +111,10 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLS
return fs, opts
}
type retryOptions struct {
maxRetry int // The number of times to possibly retry
}
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
opts := retry.RetryOptions{}
fs := pflag.FlagSet{}
@@ -167,10 +136,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
if opts.dockerImageOptions.authFilePath.present {
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.value
}
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.present {
// If both this deprecated option and a non-deprecated option is present, we use the latter value.
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.deprecatedTLSVerify.tlsVerify.value)
}
if opts.tlsVerify.present {
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
}
@@ -201,20 +166,18 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport
dirForceDecompression bool // Decompress layers when saving to the dir: transport
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
compressionFormat string // Format to use for the compression
compressionLevel optionalInt // Level to use for the compression
}
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
opts := imageDestOptions{imageOptions: genericOptions}
fs := pflag.FlagSet{}
fs.AddFlagSet(&genericFlags)
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
fs.BoolVar(&opts.dirForceDecompression, flagPrefix+"decompress", false, "Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
fs.BoolVar(&opts.ociAcceptUncompressedLayers, flagPrefix+"oci-accept-uncompressed-layers", false, "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)")
fs.StringVar(&opts.compressionFormat, flagPrefix+"compress-format", "", "`FORMAT` to use for the compression")
fs.Var(newOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
@@ -230,7 +193,6 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
}
ctx.DirForceCompress = opts.dirForceCompression
ctx.DirForceDecompress = opts.dirForceDecompression
ctx.OCIAcceptUncompressedLayers = opts.ociAcceptUncompressedLayers
if opts.compressionFormat != "" {
cf, err := compression.AlgorithmByName(opts.compressionFormat)
@@ -284,21 +246,6 @@ func parseImageSource(ctx context.Context, opts *imageOptions, name string) (typ
return ref.NewImageSource(ctx, sys)
}
// parseManifestFormat parses format parameter for copy and sync command.
// It returns string value to use as manifest MIME type
func parseManifestFormat(manifestFormat string) (string, error) {
switch manifestFormat {
case "oci":
return imgspecv1.MediaTypeImageManifest, nil
case "v2s1":
return manifest.DockerV2Schema1SignedMediaType, nil
case "v2s2":
return manifest.DockerV2Schema2MediaType, nil
default:
return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", manifestFormat)
}
}
// usageTemplate returns the usage template for skopeo commands
// This blocks the displaying of the global options. The main skopeo
// command should not use this.

View File

@@ -4,11 +4,8 @@ import (
"os"
"testing"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -18,26 +15,17 @@ func fakeGlobalOptions(t *testing.T, flags []string) (*globalOptions, *cobra.Com
app, opts := createApp()
cmd := &cobra.Command{}
app.AddCommand(cmd)
err := app.ParseFlags(flags)
err := cmd.ParseFlags(flags)
require.NoError(t, err)
return opts, cmd
}
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
func fakeImageOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
globalFlags []string, cmdFlags []string) *imageOptions {
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
sharedFlags, sharedOpts := sharedImageFlags()
var deprecatedTLSVerifyFlag pflag.FlagSet
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
if useDeprecatedTLSVerify {
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
}
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
cmd.Flags().AddFlagSet(&sharedFlags)
if useDeprecatedTLSVerify {
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
}
cmd.Flags().AddFlagSet(&imageFlags)
err := cmd.ParseFlags(cmdFlags)
require.NoError(t, err)
@@ -46,15 +34,13 @@ func fakeImageOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bo
func TestImageOptionsNewSystemContext(t *testing.T) {
// Default state
opts := fakeImageOptions(t, "dest-", true, []string{}, []string{})
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
// Set everything to non-default values.
opts = fakeImageOptions(t, "dest-", true, []string{
opts = fakeImageOptions(t, "dest-", []string{
"--registries.d", "/srv/registries.d",
"--override-arch", "overridden-arch",
"--override-os", "overridden-os",
@@ -86,33 +72,52 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
DockerRegistryUserAgent: defaultUserAgent,
BigFilesTemporaryDir: "/srv",
}, res)
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
// Global/per-command tlsVerify behavior
for _, c := range []struct {
global, cmd string
expectedDocker types.OptionalBool
expectedDockerDaemon bool
}{
{"", "", types.OptionalBoolUndefined, false},
{"", "false", types.OptionalBoolTrue, true},
{"", "true", types.OptionalBoolFalse, false},
{"false", "", types.OptionalBoolTrue, false},
{"false", "false", types.OptionalBoolTrue, true},
{"false", "true", types.OptionalBoolFalse, false},
{"true", "", types.OptionalBoolFalse, false},
{"true", "false", types.OptionalBoolTrue, true},
{"true", "true", types.OptionalBoolFalse, false},
} {
globalFlags := []string{}
if c.global != "" {
globalFlags = append(globalFlags, "--tls-verify="+c.global)
}
cmdFlags := []string{}
if c.cmd != "" {
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
}
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
res, err = opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
}
// Invalid option values
opts = fakeImageOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
_, err = opts.newSystemContext()
assert.Error(t, err)
}
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
func fakeImageDestOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerify bool,
globalFlags []string, cmdFlags []string) *imageDestOptions {
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
globalOpts, cmd := fakeGlobalOptions(t, globalFlags)
sharedFlags, sharedOpts := sharedImageFlags()
var deprecatedTLSVerifyFlag pflag.FlagSet
var deprecatedTLSVerifyOpt *deprecatedTLSVerifyOption
if useDeprecatedTLSVerify {
deprecatedTLSVerifyFlag, deprecatedTLSVerifyOpt = deprecatedTLSVerifyFlags()
}
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, deprecatedTLSVerifyOpt, flagPrefix, "")
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
cmd.Flags().AddFlagSet(&sharedFlags)
if useDeprecatedTLSVerify {
cmd.Flags().AddFlagSet(&deprecatedTLSVerifyFlag)
}
cmd.Flags().AddFlagSet(&imageFlags)
err := cmd.ParseFlags(cmdFlags)
require.NoError(t, err)
@@ -121,12 +126,10 @@ func fakeImageDestOptions(t *testing.T, flagPrefix string, useDeprecatedTLSVerif
func TestImageDestOptionsNewSystemContext(t *testing.T) {
// Default state
opts := fakeImageDestOptions(t, "dest-", true, []string{}, []string{})
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
defer func() {
@@ -141,18 +144,15 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
os.Setenv("REGISTRY_AUTH_FILE", authFile)
// Explicitly set everything to default, except for when the default is “not present”
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
"--dest-compress=false",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
AuthFilePath: authFile,
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{AuthFilePath: authFile}, res)
// Set everything to non-default values.
opts = fakeImageDestOptions(t, "dest-", true, []string{
opts = fakeImageDestOptions(t, "dest-", []string{
"--registries.d", "/srv/registries.d",
"--override-arch", "overridden-arch",
"--override-os", "overridden-os",
@@ -184,141 +184,16 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
DockerRegistryUserAgent: defaultUserAgent,
DirForceCompress: true,
BigFilesTemporaryDir: "/srv",
}, res)
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
// Invalid option values in imageOptions
opts = fakeImageDestOptions(t, "dest-", true, []string{}, []string{"--dest-creds", ""})
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
_, err = opts.newSystemContext()
assert.Error(t, err)
}
func TestTLSVerifyFlags(t *testing.T) {
type systemContextOpts interface { // Either *imageOptions or *imageDestOptions
newSystemContext() (*types.SystemContext, error)
}
for _, creator := range []struct {
name string
newOpts func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts
}{
{
"imageFlags",
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
return fakeImageOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
},
},
{
"imageDestFlags",
func(useDeprecatedTLSVerify bool, globalFlags, cmdFlags []string) systemContextOpts {
return fakeImageDestOptions(t, "dest-", useDeprecatedTLSVerify, globalFlags, cmdFlags)
},
},
} {
t.Run(creator.name, func(t *testing.T) {
for _, c := range []struct {
global, deprecatedCmd, cmd string
expectedDocker types.OptionalBool
expectedDockerDaemon bool
}{
{"", "", "", types.OptionalBoolUndefined, false},
{"", "", "false", types.OptionalBoolTrue, true},
{"", "", "true", types.OptionalBoolFalse, false},
{"", "false", "", types.OptionalBoolTrue, false},
{"", "false", "false", types.OptionalBoolTrue, true},
{"", "false", "true", types.OptionalBoolFalse, false},
{"", "true", "", types.OptionalBoolFalse, false},
{"", "true", "false", types.OptionalBoolTrue, true},
{"", "true", "true", types.OptionalBoolFalse, false},
{"false", "", "", types.OptionalBoolTrue, false},
{"false", "", "false", types.OptionalBoolTrue, true},
{"false", "", "true", types.OptionalBoolFalse, false},
{"false", "false", "", types.OptionalBoolTrue, false},
{"false", "false", "false", types.OptionalBoolTrue, true},
{"false", "false", "true", types.OptionalBoolFalse, false},
{"false", "true", "", types.OptionalBoolFalse, false},
{"false", "true", "false", types.OptionalBoolTrue, true},
{"false", "true", "true", types.OptionalBoolFalse, false},
{"true", "", "", types.OptionalBoolFalse, false},
{"true", "", "false", types.OptionalBoolTrue, true},
{"true", "", "true", types.OptionalBoolFalse, false},
{"true", "false", "", types.OptionalBoolTrue, false},
{"true", "false", "false", types.OptionalBoolTrue, true},
{"true", "false", "true", types.OptionalBoolFalse, false},
{"true", "true", "", types.OptionalBoolFalse, false},
{"true", "true", "false", types.OptionalBoolTrue, true},
{"true", "true", "true", types.OptionalBoolFalse, false},
} {
globalFlags := []string{}
if c.global != "" {
globalFlags = append(globalFlags, "--tls-verify="+c.global)
}
cmdFlags := []string{}
if c.deprecatedCmd != "" {
cmdFlags = append(cmdFlags, "--tls-verify="+c.deprecatedCmd)
}
if c.cmd != "" {
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
}
opts := creator.newOpts(true, globalFlags, cmdFlags)
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
if c.deprecatedCmd == "" { // Test also the behavior when deprecatedTLSFlag is not recognized
// Use globalFlags from the previous test
cmdFlags := []string{}
if c.cmd != "" {
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
}
opts := creator.newOpts(false, globalFlags, cmdFlags)
res, err = opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
}
}
})
}
}
func TestParseManifestFormat(t *testing.T) {
for _, testCase := range []struct {
formatParam string
expectedManifestType string
expectErr bool
}{
{"oci",
imgspecv1.MediaTypeImageManifest,
false},
{"v2s1",
manifest.DockerV2Schema1SignedMediaType,
false},
{"v2s2",
manifest.DockerV2Schema2MediaType,
false},
{"",
"",
true},
{"badValue",
"",
true},
} {
manifestType, err := parseManifestFormat(testCase.formatParam)
if testCase.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, manifestType, testCase.expectedManifestType)
}
}
// since there is a shared authfile image option and a non-shared (prefixed) one, make sure the override logic
// works correctly.
func TestImageOptionsAuthfileOverride(t *testing.T) {
@@ -353,13 +228,12 @@ func TestImageOptionsAuthfileOverride(t *testing.T) {
}, "/srv/dest-authfile",
},
} {
opts := fakeImageOptions(t, testCase.flagPrefix, false, []string{}, testCase.cmdFlags)
opts := fakeImageOptions(t, testCase.flagPrefix, []string{}, testCase.cmdFlags)
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
AuthFilePath: testCase.expectedAuthfilePath,
DockerRegistryUserAgent: defaultUserAgent,
AuthFilePath: testCase.expectedAuthfilePath,
}, res)
}
}

View File

@@ -56,7 +56,6 @@ _skopeo_copy() {
local boolean_options="
--all
--dest-compress
--dest-decompress
--remove-signatures
--src-no-creds
--dest-no-creds
@@ -71,42 +70,6 @@ _skopeo_copy() {
_complete_ "$options_with_args" "$boolean_options" "$transports"
}
_skopeo_sync() {
local options_with_args="
--authfile
--dest
--dest-authfile
--dest-cert-
--dest-creds
--dest-registry-token string
--format
--retry-times
--sign-by
--src
--src-authfile
--src-cert-dir
--src-creds
--src-registry-token
"
local boolean_options="
--all
--dest-no-creds
--dest-tls-verify
--remove-signatures
--scoped
--src-no-creds
--src-tls-verify
"
local transports
transports="
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
"
_complete_ "$options_with_args" "$boolean_options" "$transports"
}
_skopeo_inspect() {
local options_with_args="
--authfile
@@ -297,7 +260,7 @@ _cli_bash_autocomplete() {
local counter=1
while [ $counter -lt "$cword" ]; do
case "${words[$counter]}" in
skopeo|copy|sync|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
skopeo|copy|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
command="${words[$counter]//-/_}"
cpos=$counter
(( cpos++ ))

View File

@@ -1,95 +0,0 @@
#!/bin/bash
# This script is intended to be executed by automation or humans
# under a hack/get_ci_vm.sh context. Use under any other circumstances
# is unlikely to function.
set -e
if [[ -r "/etc/automation_environment" ]]; then
source /etc/automation_environment
source $AUTOMATION_LIB_PATH/common_lib.sh
else
(
echo "WARNING: It does not appear that containers/automation was installed."
echo " Functionality of most of ${BASH_SOURCE[0]} will be negatively"
echo " impacted."
) > /dev/stderr
fi
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
# GCE image-name compatible string representation of distribution _major_ version
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
# Combined to ease some usage
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
export "PATH=$PATH:$GOPATH/bin"
podmanmake() {
req_env_vars GOPATH SKOPEO_PATH SKOPEO_CI_CONTAINER_FQIN
warn "Accumulated technical-debt requires execution inside a --privileged container. This is very likely hiding bugs!"
showrun podman run -it --rm --privileged \
-e GOPATH=$GOPATH \
-v $GOPATH:$GOPATH:Z \
-w $SKOPEO_PATH \
$SKOPEO_CI_CONTAINER_FQIN \
make "$@"
}
_run_setup() {
if [[ "$OS_RELEASE_ID" == "fedora" ]]; then
# This is required as part of the standard Fedora VM setup
growpart /dev/sda 1
resize2fs /dev/sda1
# VM's come with the distro. skopeo pre-installed
dnf erase -y skopeo
else
die "Unknown/unsupported distro. $OS_REL_VER"
fi
}
_run_vendor() {
podmanmake vendor BUILDTAGS="$BUILDTAGS"
}
_run_build() {
make bin/skopeo BUILDTAGS="$BUILDTAGS"
}
_run_cross() {
podmanmake local-cross BUILDTAGS="$BUILDTAGS"
}
_run_doccheck() {
make validate-docs BUILDTAGS="$BUILDTAGS"
}
_run_unit() {
podmanmake test-unit-local BUILDTAGS="$BUILDTAGS"
}
_run_integration() {
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
}
_run_system() {
# Ensure we start with a clean-slate
podman system reset --force
# Executes with containers required for testing.
showrun make test-system-local BUILDTAGS="$BUILDTAGS"
}
req_env_vars SKOPEO_PATH BUILDTAGS
handler="_run_${1}"
if [ "$(type -t $handler)" != "function" ]; then
die "Unknown/Unsupported command-line argument '$1'"
fi
msg "************************************************************"
msg "Runner executing $1 on $OS_REL_VER"
msg "************************************************************"
cd "$SKOPEO_PATH"
$handler

View File

@@ -6,35 +6,22 @@
## Overview
This directory contains the Dockerfiles necessary to create the skopeoimage container
images that are housed on quay.io under the skopeo account. All repositories where
the images live are public and can be pulled without credentials. These container images are secured and the
resulting containers can run safely with privileges within the container.
This directory contains the Dockerfiles necessary to create the three skopeoimage container
images that are housed on quay.io under the skopeo account. All three repositories where
the images live are public and can be pulled without credentials. These container images
are secured and the resulting containers can run safely. The container images are built
using the latest Fedora and then Skopeo is installed into them:
The container images are built using the latest Fedora and then Skopeo is installed into them.
The PATH in the container images is set to the default PATH provided by Fedora. Also, the
ENTRYPOINT and the WORKDIR variables are not set within these container images, as such they
default to `/`.
* quay.io/skopeo/stable - This image is built using the latest stable version of Skopeo in a Fedora based container. Built with skopeoimage/stable/Dockerfile.
* quay.io/skopeo/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with skopeoimage/upstream/Dockerfile.
* quay.io/skopeo/testing - This image is built using the latest version of Skopeo that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with skopeoimage/testing/Dockerfile.
The container images are:
## Multiarch images
* `quay.io/containers/skopeo:<version>` and `quay.io/skopeo/stable:<version>` -
These images are built when a new Skopeo version becomes available in
Fedora. These images are intended to be unchanging and stable, they will
never be updated by automation once they've been pushed. For build details,
please [see the configuration file](stable/Dockerfile).
* `quay.io/containers/skopeo:latest` and `quay.io/skopeo/stable:latest` -
Built daily using the same Dockerfile as above. The skopeo version
will remain the "latest" available in Fedora, however the image
contents may vary compared to the version-tagged images.
* `quay.io/skopeo/testing:latest` - This image is built daily, using the
latest version of Skopeo that was in the Fedora `updates-testing` repository.
The image is Built with [the testing Dockerfile](testing/Dockerfile).
* `quay.io/skopeo/upstream:latest` - This image is built daily using the latest
code found in this GitHub repository. Due to the image changing frequently,
it's not guaranteed to be stable or even executable. The image is built with
[the upstream Dockerfile](upstream/Dockerfile).
Multiarch images are available for Skopeo upstream and stable versions. Supported architectures are `amd64`, `s390x`, `ppc64le`.
Available images are `quay.io/skopeo/upstream:master`, `quay.io/skopeo/stable:v1.2.0`, `quay.io/containers/skopeo:v1.2.0`.
Images can be used the same way as in a single architecture case, no extra setup is required. For samples see next chapter.
## Sample Usage

View File

@@ -6,7 +6,7 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
@@ -27,7 +27,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -7,7 +7,7 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
@@ -28,7 +28,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -6,7 +6,7 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
@@ -30,7 +30,7 @@ git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/conta
export GOPATH=/root/skopeo; \
cd /root/skopeo/src/github.com/containers/skopeo; \
make bin/skopeo;\
make PREFIX=/usr install;\
make install;\
rm -rf /root/skopeo/*; \
yum -y remove git golang go-md2man make; \
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
@@ -48,7 +48,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -4,7 +4,7 @@
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
## SYNOPSIS
**skopeo copy** [*options*] _source-image_ _destination-image_
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
## DESCRIPTION
Copy an image (manifest, filesystem layers, signatures) from one location to another.
@@ -20,11 +20,7 @@ automatically inherit any parts of the source name.
## OPTIONS
**--additional-tag**=_strings_
Additional tags (supports docker-archive).
**--all**, **-a**
**--all**
If _source-image_ refers to a list of images, instead of copying just the image which matches the current OS and
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
@@ -46,123 +42,51 @@ Path of the authentication file for the source registry. Uses path given by `--a
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
**--dest-shared-blob-dir** _directory_
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
Directory to use to share blobs across OCI repositories.
**--quiet, -q** suppress output information when copying images
**--digestfile** _path_
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
After copying the image, write the digest of the resulting image to the file.
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
**--encrypt-layer** _ints_
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--format**, **-f** _manifest-type_
**--src-creds** _username[:password]_ for accessing the source registry.
MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--help**, **-h**
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
Print usage statement
**--dest-creds** _username[:password]_ for accessing the destination registry.
**--quiet**, **-q**
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
Suppress output information when copying images.
**--src-no-creds** _bool-value_ Access the registry anonymously.
**--remove-signatures**
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
**--sign-by**=_key-id_
**--dest-no-creds** _bool-value_ Access the registry anonymously.
Add a signature using that key ID for an image name corresponding to _destination-image_
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
**--src-shared-blob-dir** _directory_
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
Directory to use to share blobs across OCI repositories.
**--encryption-key** _protocol:keyfile_
Specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--decryption-key** _key[:passphrase]_
Key to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--src-creds** _username[:password]_
Credentials for accessing the source registry.
**--dest-compress** _bool-value_
Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--dest-decompress** _bool-value_
Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--dest-oci-accept-uncompressed-layers** _bool-value_
Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
**--dest-creds** _username[:password]_
Credentials for accessing the destination registry.
**--src-cert-dir** _path_
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
**--src-no-creds** _bool-value_
Access the registry anonymously.
**--src-tls-verify** _bool-value_
Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
**--dest-cert-dir** _path_
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
**--dest-no-creds** _bool-value_
Access the registry anonymously.
**--dest-tls-verify** _bool-value_
Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
**--src-daemon-host** _host_
Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
**--dest-daemon-host** _host_
Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
Existing signatures, if any, are preserved as well.
**--dest-compress-format** _format_
**--dest-compress-format** _format_ Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--dest-compress-level** _format_
**--src-registry-token** _Bearer token_ for accessing the source registry.
Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--src-registry-token** _token_
Bearer token for accessing the source registry.
**--dest-registry-token** _token_
Bearer token for accessing the destination registry.
**--retry-times**
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
@@ -220,8 +144,9 @@ skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx
```
## SEE ALSO
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5), containers-signature(5)
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-policy.json(5), containers-transports(5)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -4,7 +4,7 @@
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
## SYNOPSIS
**skopeo delete** [*options*] _image-name_
**skopeo delete** _image-name_
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
@@ -19,50 +19,22 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
```
## OPTIONS
**--authfile** _path_
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_
**--creds** _username[:password]_ for accessing the registry.
Credentials for accessing the registry.
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
**--cert-dir** _path_
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
**--daemon-host** _host_
Use docker daemon host at _host_ (`docker-daemon:` transport only)
**--help**, **-h**
Print usage statement
**--no-creds** _bool-value_
Access the registry anonymously.
**--no-creds** _bool-value_ Access the registry anonymously.
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
**--registry-token** _token_
Bearer token for accessing the registry.
**--retry-times**
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
**--shared-blob-dir** _directory_
Directory to use to share blobs across OCI repositories.
**--tls-verify**=_bool_
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
@@ -79,3 +51,4 @@ skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -31,19 +31,11 @@ Output configuration in OCI format, default is to format in JSON format.
Username and password for accessing the registry.
**--daemon-host** _host_
Use docker daemon host at _host_ (`docker-daemon:` transport only)
**--format**, **-f**=*format*
Format the output using the given Go template.
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
**--help**, **-h**
Print usage statement
**--no-creds**
Access the registry anonymously.
@@ -61,13 +53,9 @@ Registry token for accessing the registry.
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
**--shared-blob-dir** _directory_
**--tls-verify**
Directory to use to share blobs across OCI repositories.
**--tls-verify**=_bool_
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
Require HTTPS and verify certificates when talking to container registries (defaults to true).
## EXAMPLES

View File

@@ -1,47 +1,29 @@
% skopeo-list-tags(1)
## NAME
skopeo\-list\-tags - List tags in the transport-specific image repository.
skopeo\-list\-tags - Return a list of tags for the transport-specific image repository.
## SYNOPSIS
**skopeo list-tags** [*options*] _repository-name_
**skopeo list-tags** _repository-name_
Return a list of tags from _repository-name_ in a registry.
_repository-name_ name of repository to retrieve tag listing from
## OPTIONS
**--authfile** _path_
**--authfile** _path_
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry.
**--cert-dir** _path_
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--help**, **-h**
**--no-creds** _bool-value_ Access the registry anonymously.
Print usage statement
**--no-creds** _bool-value_
Access the registry anonymously.
**--registry-token** _Bearer token_
Bearer token for accessing the registry.
**--retry-times**
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
**--tls-verify**=_bool_
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
**--registry-token** _Bearer token_ for accessing the registry.
## REPOSITORY NAMES
@@ -52,18 +34,18 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
**docker://**_docker-repository-reference_
A repository in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(skopeo login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
A _docker-repository-reference_ is of the form: **registryhost:port/repositoryname** which is similar to an _image-reference_ but with no tag or digest allowed as the last component (e.g no `:latest` or `@sha256:xyz`)
Examples of valid docker-repository-references:
"docker.io/myuser/myrepo"
"docker.io/nginx"
"docker.io/library/fedora"
"localhost:5000/myrepository"
Examples of invalid references:
"docker.io/nginx:latest"
"docker.io/myuser/myimage:v1.0"
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
## EXAMPLES
@@ -119,3 +101,4 @@ skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
## AUTHORS
Zach Hill <zach@anchore.com>

View File

@@ -4,7 +4,7 @@
skopeo\-login - Login to a container registry.
## SYNOPSIS
**skopeo login** [*options*] _registry_
**skopeo login** [*options*] *registry*
## DESCRIPTION
**skopeo login** logs into a specified registry server with the correct username
@@ -43,18 +43,16 @@ Return the logged-in user for the registry. Return error if no login is found.
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
Default certificates directory is _/etc/containers/certs.d_.
**--tls-verify**=*true|false*
Require HTTPS and verify certificates when contacting registries (default: true). If explicitly set to true,
then TLS verification will be used. If set to false, then TLS verification will not be used. If not specified,
TLS verification will be used unless the target registry is listed as an insecure registry in registries.conf.
**--help**, **-h**
Print usage statement
**--tls-verify**=_bool_
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
**--verbose**, **-v**
Write more detailed information to stdout
## EXAMPLES
```

View File

@@ -4,7 +4,7 @@
skopeo\-logout - Logout of a container registry.
## SYNOPSIS
**skopeo logout** [*options*] _registry_
**skopeo logout** [*options*] *registry*
## DESCRIPTION
**skopeo logout** logs out of a specified registry server by deleting the cached credentials
@@ -29,10 +29,6 @@ Remove the cached credentials for all registries in the auth file
Print usage statement
**--tls-verify**=_bool_
Require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)
## EXAMPLES
```

View File

@@ -10,12 +10,6 @@ skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and wri
Compute a manifest digest of _manifest-file_ and write it to standard output.
## OPTIONS
**--help**, **-h**
Print usage statement
## EXAMPLES
```sh
@@ -29,3 +23,4 @@ skopeo(1)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -4,10 +4,11 @@
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
## SYNOPSIS
**skopeo standalone-sign** [*options*] _manifest_ _docker-reference_ _key-fingerprint_ **--output**|**-o** _signature_
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
## DESCRIPTION
This is primarily a debugging tool, useful for special cases, and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
This is primarily a debugging tool, or useful for special cases,
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
_manifest_ Path to a file containing the image manifest
@@ -15,15 +16,7 @@ This is primarily a debugging tool, useful for special cases, and usually should
_key-fingerprint_ Key identity to use for signing
## OPTIONS
**--help**, **-h**
Print usage statement
**--output**, **-o** _output file_
Write signature to _output file_.
**--output**|**-o** output file
## EXAMPLES
@@ -32,13 +25,10 @@ $ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busy
$
```
## NOTES
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
## SEE ALSO
skopeo(1), skopeo-copy(1), containers-signature(5)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -4,13 +4,11 @@
skopeo\-standalone\-verify - Verify an image signature.
## SYNOPSIS
**skopeo standalone-verify** _manifest_ _docker-reference_ _key-fingerprint_ _signature_
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
## DESCRIPTION
Verify a signature using local files; the digest will be printed on success. This is primarily a debugging tool, useful for special cases,
and usually should not be a part of your normal operational workflow. Additionally, consider configuring a signature verification policy file,
as per containers-policy.json(5).
Verify a signature using local files, digest will be printed on success.
_manifest_ Path to a file containing the image manifest
@@ -22,12 +20,6 @@ as per containers-policy.json(5).
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
## OPTIONS
**--help**, **-h**
Print usage statement
## EXAMPLES
```sh
@@ -35,13 +27,10 @@ $ skopeo standalone-verify busybox-manifest.json registry.example.com/example/bu
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
```
## NOTES
This command is intended for use with local signatures e.g. OpenPGP ( other signature formats may be added in the future ), as per containers-signature(5). Furthermore, this command does **not** interact with the artifacts generated by Docker Content Trust (DCT). For more information, please see [containers-signature(5)](https://github.com/containers/image/blob/main/docs/containers-signature.5.md).
## SEE ALSO
skopeo(1), containers-signature(5), containers-policy.json(5)
skopeo(1), containers-signature(5)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -5,7 +5,7 @@ skopeo\-sync - Synchronize images between container registries and local directo
## SYNOPSIS
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
**skopeo sync** --src _transport_ --dest _transport_ _source_ _destination_
## DESCRIPTION
Synchronize images between container registries and local directories.
@@ -32,7 +32,7 @@ When the `--scoped` option is specified, images are prefixed with the source ima
name can be stored at _destination_.
## OPTIONS
**--all**, **-a**
**--all**
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
the images in the list, and the list itself.
@@ -50,21 +50,15 @@ Path of the authentication file for the source registry. Uses path given by `--a
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
**--src**, **-s** _transport_ Transport for the source repository.
**--src** _transport_ Transport for the source repository.
**--dest**, **-d** _transport_ Destination transport.
**--format**, **-f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks).
**--help**, **-h**
Print usage statement.
**--dest** _transport_ Destination transport.
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
**--sign-by**=_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
**--sign-by=**_key-id_ Add a signature using that key ID for an image name corresponding to _destination-image_.
**--src-creds** _username[:password]_ for accessing the source registry.
@@ -86,8 +80,6 @@ Print usage statement.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
## EXAMPLES
### Synchronizing to a local directory

View File

@@ -51,64 +51,42 @@ See [containers-transports(5)](https://github.com/containers/image/blob/master/d
## OPTIONS
**--command-timeout** _duration_
**--command-timeout** _duration_ Timeout for the command execution.
Timeout for the command execution.
**--debug** enable debug output
**--debug**
**--help**|**-h** Show help
enable debug output
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
**--help**, **-h**
**--override-arch** _arch_ Use _arch_ instead of the architecture of the machine for choosing images.
Show help
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
**--insecure-policy**
**--override-variant** _VARIANT_ Use _VARIANT_ instead of the running architecture variant for choosing images.
Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
**--override-arch** _arch_
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
Use _arch_ instead of the architecture of the machine for choosing images.
**--tmpdir** _dir_ used to store temporary files. Defaults to /var/tmp.
**--override-os** _os_
Use _OS_ instead of the running OS for choosing images.
**--override-variant** _variant_
Use _variant_ instead of the running architecture variant for choosing images.
**--policy** _path-to-policy_
Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
**--registries.d** _dir_
Use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
**--tmpdir** _dir_
Directory used to store temporary files. Defaults to /var/tmp.
**--version**, **-v**
Print the version number
**--version**|**-v** print the version number
## COMMANDS
| Command | Description |
| ----------------------------------------- | ------------------------------------------------------------------------------ |
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List tags in the transport-specific image repository. |
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List the tags for the given transport/repository. |
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Copy images from one or more repositories to a user specified destination. |
## FILES
**/etc/containers/policy.json**

23
go.mod
View File

@@ -3,23 +3,28 @@ module github.com/containers/skopeo
go 1.12
require (
github.com/containers/common v0.42.0
github.com/containers/image/v5 v5.15.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/storage v1.33.1
github.com/docker/docker v20.10.7+incompatible
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/containers/common v0.33.4
github.com/containers/image/v5 v5.10.2
github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.24.6
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible
github.com/dsnet/compress v0.0.1 // indirect
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/gogo/protobuf v1.3.2 // indirect
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
github.com/opencontainers/runc v1.0.0-rc92 // indirect
github.com/pkg/errors v0.9.1
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.2.1
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.0.3 // indirect
)

917
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by Red Hat skopeo developers who need
# to debug problems specifically related to Cirrus-CI automated testing.
# It requires that you have been granted prior access to create VMs in
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
# with supervision upon request.
set -e
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# Help detect if we were called by get_ci_vm container
GET_CI_VM="${GET_CI_VM:-0}"
in_get_ci_vm() {
if ((GET_CI_VM==0)); then
echo "Error: $1 is not intended for use in this context"
exit 2
fi
}
# get_ci_vm APIv1 container entrypoint calls into this script
# to obtain required repo. specific configuration options.
if [[ "$1" == "--config" ]]; then
in_get_ci_vm "$1"
cat <<EOF
DESTDIR="/var/tmp/go/src/github.com/containers/skopeo"
UPSTREAM_REPO="https://github.com/containers/skopeo.git"
GCLOUD_PROJECT="skopeo"
GCLOUD_IMGPROJECT="libpod-218412"
GCLOUD_CFG="skopeo"
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
GCLOUD_CPUS="2"
GCLOUD_MEMORY="4Gb"
GCLOUD_DISK="200"
EOF
elif [[ "$1" == "--setup" ]]; then
in_get_ci_vm "$1"
# get_ci_vm container entrypoint calls us with this option on the
# Cirrus-CI environment instance, to perform repo.-specific setup.
echo "+ Executing setup" > /dev/stderr
${GOSRC}/${SCRIPT_BASE}/runner.sh setup
else
# Create and access VM for specified Cirrus-CI task
mkdir -p $HOME/.config/gcloud/ssh
podman run -it --rm \
--tz=local \
-e NAME="$USER" \
-e SRCDIR=/src \
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
-e DEBUG="${DEBUG:-0}" \
-v $REPO_DIRPATH:/src:O \
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
quay.io/libpod/get_ci_vm:latest "$@"
fi

View File

@@ -25,8 +25,12 @@ export MAKEDIR="$SCRIPTDIR/make"
# We're a nice, sexy, little shell script, and people might try to run us;
# but really, they shouldn't. We want to be in a container!
# The magic value is defined inside our Dockerfile.
if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
inContainer="AssumeSoInitially"
if [ "$PWD" != "/go/src/$SKOPEO_PKG" ]; then
unset inContainer
fi
if [ -z "$inContainer" ]; then
{
echo "# WARNING! I don't seem to be running in a Docker container."
echo "# The result of this command might be an incorrect build, and will not be"
@@ -35,9 +39,6 @@ if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
echo "# Try this instead: make all"
echo "#"
} >&2
else
echo "# I appear to be running inside my designated container image, good!"
export SKOPEO_CONTAINER_TESTS=1
fi
echo

View File

@@ -5,7 +5,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
# are running more than one validate bundlescript
VALIDATE_REPO='https://github.com/containers/skopeo.git'
VALIDATE_BRANCH='main'
VALIDATE_BRANCH='master'
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"

View File

@@ -9,6 +9,6 @@ bundle_test_integration() {
# subshell so that we can export PATH without breaking other things
(
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make PREFIX=/usr install
make install
bundle_test_integration
) 2>&1

View File

@@ -12,7 +12,7 @@ sed -i \
# Build skopeo, install into /usr/bin
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make PREFIX=/usr install
make install
# Run tests
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest

View File

@@ -1,150 +0,0 @@
#!/usr/bin/env bash
#
# man-page-checker - validate and cross-reference man page names
#
# This is the script that cross-checks BETWEEN MAN PAGES. It is not the
# script that cross-checks that each option in skopeo foo --help is listed
# in skopeo-foo.1.md and vice-versa; that one is xref-helpmsgs-manpages.
#
verbose=
for i; do
case "$i" in
-v|--verbose) verbose=verbose ;;
esac
done
die() {
echo "$(basename $0): $*" >&2
exit 1
}
cd $(dirname $0)/../docs || die "Please run me from top-level skopeo dir"
rc=0
# Pass 1: cross-check file names with NAME section
#
# for a given skopeo-foo.1.md, the NAME should be 'skopeo-foo'
for md in *.1.md;do
# Read the first line after '## NAME'
name=$(egrep -A1 '^## NAME' $md|tail -1|awk '{print $1}' | tr -d \\\\)
expect=$(basename $md .1.md)
if [ "$name" != "$expect" ]; then
echo
printf "Inconsistent program NAME in %s:\n" $md
printf " NAME= %s (expected: %s)\n" $name $expect
rc=1
fi
done
# Pass 2: compare descriptions.
#
# Make sure the descriptive text in skopeo-foo.1.md matches the one
# in the table in skopeo.1.md.
for md in $(ls -1 *-*.1.md);do
desc=$(egrep -A1 '^## NAME' $md|tail -1|sed -E -e 's/^skopeo[^[:space:]]+ - //')
# Find the descriptive text in the main skopeo man page.
parent=skopeo.1.md
parent_desc=$(grep $md $parent | awk -F'|' '{print $3}' | sed -E -e 's/^[[:space:]]+//' -e 's/[[:space:]]+$//')
if [ "$desc" != "$parent_desc" ]; then
echo
printf "Inconsistent subcommand descriptions:\n"
printf " %-32s = '%s'\n" $md "$desc"
printf " %-32s = '%s'\n" $parent "$parent_desc"
printf "Please ensure that the NAME section of $md\n"
printf "matches the subcommand description in $parent\n"
rc=1
fi
done
# Helper function: compares man page synopsis vs --help usage message
function compare_usage() {
local cmd="$1"
local from_man="$2"
# Run 'cmd --help', grab the line immediately after 'Usage:'
local help_output=$(../bin/$cmd --help)
local from_help=$(echo "$help_output" | grep -A1 '^Usage:' | tail -1)
# strip off command name from both
from_man=$(sed -E -e "s/\*\*$cmd\*\*[[:space:]]*//" <<<"$from_man")
from_help=$(sed -E -e "s/^[[:space:]]*$cmd[[:space:]]*//" <<<"$from_help")
# man page lists 'foo [*options*]', help msg shows 'foo [command options]'.
# Make sure if one has it, the other does too.
if expr "$from_man" : "\[\*options\*\]" >/dev/null; then
if expr "$from_help" : "\[command options\]" >/dev/null; then
:
else
echo "WARNING: $cmd: man page shows '[*options*]', help does not show [command options]"
rc=1
fi
elif expr "$from_help" : "\[command options\]" >/dev/null; then
echo "WARNING: $cmd: --help shows [command options], man page does not show [*options*]"
rc=1
fi
# Strip off options and flags; start comparing arguments
from_man=$(sed -E -e 's/^\[\*options\*\][[:space:]]*//' <<<"$from_man")
from_help=$(sed -E -e 's/^\[command options\][[:space:]]*//' <<<"$from_help")
# Constant strings in man page are '**foo**', in --help are 'foo'.
from_man=$(sed -E -e 's/\*\*([^*]+)\*\*/\1/g' <<<"$from_man")
# Args in man page are '_foo_', in --help are 'FOO'. Convert all to
# UPCASE simply because it stands out better to the eye.
from_man=$(sed -E -e 's/_([a-z-]+)_/\U\1/g' <<<"$from_man")
# Compare man-page and --help usage strings. Skip 'skopeo' itself,
# because the man page includes '[global options]' which we don't grok.
if [[ "$from_man" != "$from_help" && "$cmd" != "skopeo" ]]; then
printf "%-25s man='%s' help='%s'\n" "$cmd:" "$from_man" "$from_help"
rc=1
fi
}
# Pass 3: compare synopses.
#
# Make sure the SYNOPSIS line in skopeo-foo.1.md reads '**skopeo foo** ...'
for md in *.1.md;do
synopsis=$(egrep -A1 '^#* SYNOPSIS' $md|tail -1)
# Command name must be bracketed by double asterisks; options and
# arguments are bracketed by single ones.
# E.g. '**skopeo copy** [*options*] _..._'
# Get the command name, and confirm that it matches the md file name.
cmd=$(echo "$synopsis" | sed -E -e 's/^\*\*([^*]+)\*\*.*/\1/' | tr -d \*)
# Use sed, not tr, so we only replace the first dash: we want
# skopeo-list-tags -> "skopeo list-tags", not "skopeo list tags"
md_nodash=$(basename "$md" .1.md | sed -e 's/-/ /')
if [ "$cmd" != "$md_nodash" ]; then
echo
printf "Inconsistent program name in SYNOPSIS in %s:\n" $md
printf " SYNOPSIS = %s (expected: '%s')\n" "$cmd" "$md_nodash"
rc=1
fi
# The convention is to use UPPER CASE in 'skopeo foo --help',
# but *lower case bracketed by asterisks* in the man page
if expr "$synopsis" : ".*[A-Z]" >/dev/null; then
echo
printf "Inconsistent capitalization in SYNOPSIS in %s\n" $md
printf " '%s' should not contain upper-case characters\n" "$synopsis"
rc=1
fi
# (for debugging, and getting a sense of standard conventions)
#printf " %-32s ------ '%s'\n" $md "$synopsis"
# If bin/skopeo is available, run "cmd --help" and compare Usage
# messages. This is complicated, so do it in a helper function.
compare_usage "$md_nodash" "$synopsis"
done
exit $rc

17
hack/travis_osx.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -e
export GOPATH=$(pwd)/_gopath
export PATH=$GOPATH/bin:$PATH
_containers="${GOPATH}/src/github.com/containers"
mkdir -vp ${_containers}
ln -vsf $(pwd) ${_containers}/skopeo
go version
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
cd ${_containers}/skopeo
make validate-local test-unit-local bin/skopeo
sudo make install
skopeo -v

View File

@@ -1,277 +0,0 @@
#!/usr/bin/perl
#
# xref-helpmsgs-manpages - cross-reference --help options against man pages
#
package LibPod::CI::XrefHelpmsgsManpages;
use v5.14;
use utf8;
use strict;
use warnings;
(our $ME = $0) =~ s|.*/||;
our $VERSION = '0.1';
# For debugging, show data structures using DumpTree($var)
#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0;
# unbuffer output
$| = 1;
###############################################################################
# BEGIN user-customizable section
# Path to skopeo executable
my $Default_Skopeo = './bin/skopeo';
my $SKOPEO = $ENV{SKOPEO} || $Default_Skopeo;
# Path to all doc files (markdown)
my $Docs_Path = 'docs';
# Global error count
my $Errs = 0;
# END user-customizable section
###############################################################################
###############################################################################
# BEGIN boilerplate args checking, usage messages
sub usage {
print <<"END_USAGE";
Usage: $ME [OPTIONS]
$ME recursively runs 'skopeo --help' against
all subcommands; and recursively reads skopeo-*.1.md files
in $Docs_Path, then cross-references that each --help
option is listed in the appropriate man page and vice-versa.
$ME invokes '\$SKOPEO' (default: $Default_Skopeo).
Exit status is zero if no inconsistencies found, one otherwise
OPTIONS:
-v, --verbose show verbose progress indicators
-n, --dry-run make no actual changes
--help display this message
--version display program name and version
END_USAGE
exit;
}
# Command-line options. Note that this operates directly on @ARGV !
our $debug = 0;
our $verbose = 0;
sub handle_opts {
use Getopt::Long;
GetOptions(
'debug!' => \$debug,
'verbose|v' => \$verbose,
help => \&usage,
version => sub { print "$ME version $VERSION\n"; exit 0 },
) or die "Try `$ME --help' for help\n";
}
# END boilerplate args checking, usage messages
###############################################################################
############################## CODE BEGINS HERE ###############################
# The term is "modulino".
__PACKAGE__->main() unless caller();
# Main code.
sub main {
# Note that we operate directly on @ARGV, not on function parameters.
# This is deliberate: it's because Getopt::Long only operates on @ARGV
# and there's no clean way to make it use @_.
handle_opts(); # will set package globals
# Fetch command-line arguments. Barf if too many.
die "$ME: Too many arguments; try $ME --help\n" if @ARGV;
my $help = skopeo_help();
my $man = skopeo_man('skopeo');
xref_by_help($help, $man);
xref_by_man($help, $man);
exit !!$Errs;
}
###############################################################################
# BEGIN cross-referencing
##################
# xref_by_help # Find keys in '--help' but not in man
##################
sub xref_by_help {
my ($help, $man, @subcommand) = @_;
for my $k (sort keys %$help) {
if (exists $man->{$k}) {
if (ref $help->{$k}) {
xref_by_help($help->{$k}, $man->{$k}, @subcommand, $k);
}
# Otherwise, non-ref is leaf node such as a --option
}
else {
my $man = $man->{_path} || 'man';
warn "$ME: skopeo @subcommand --help lists $k, but $k not in $man\n";
++$Errs;
}
}
}
#################
# xref_by_man # Find keys in man pages but not in --help
#################
#
# In an ideal world we could share the functionality in one function; but
# there are just too many special cases in man pages.
#
sub xref_by_man {
my ($help, $man, @subcommand) = @_;
# FIXME: this generates way too much output
for my $k (grep { $_ ne '_path' } sort keys %$man) {
if (exists $help->{$k}) {
if (ref $man->{$k}) {
xref_by_man($help->{$k}, $man->{$k}, @subcommand, $k);
}
}
elsif ($k ne '--help' && $k ne '-h') {
my $man = $man->{_path} || 'man';
warn "$ME: skopeo @subcommand: $k in $man, but not --help\n";
++$Errs;
}
}
}
# END cross-referencing
###############################################################################
# BEGIN data gathering
#################
# skopeo_help # Parse output of 'skopeo [subcommand] --help'
#################
sub skopeo_help {
my %help;
open my $fh, '-|', $SKOPEO, @_, '--help'
or die "$ME: Cannot fork: $!\n";
my $section = '';
while (my $line = <$fh>) {
# Cobra is blessedly consistent in its output:
# Usage: ...
# Available Commands:
# ....
# Options:
# ....
#
# Start by identifying the section we're in...
if ($line =~ /^Available\s+(Commands):/) {
$section = lc $1;
}
elsif ($line =~ /^(Flags):/) {
$section = lc $1;
}
# ...then track commands and options. For subcommands, recurse.
elsif ($section eq 'commands') {
if ($line =~ /^\s{1,4}(\S+)\s/) {
my $subcommand = $1;
print "> skopeo @_ $subcommand\n" if $debug;
$help{$subcommand} = skopeo_help(@_, $subcommand)
unless $subcommand eq 'help'; # 'help' not in man
}
}
elsif ($section eq 'flags') {
# Handle '--foo' or '-f, --foo'
if ($line =~ /^\s{1,10}(--\S+)\s/) {
print "> skopeo @_ $1\n" if $debug;
$help{$1} = 1;
}
elsif ($line =~ /^\s{1,10}(-\S),\s+(--\S+)\s/) {
print "> skopeo @_ $1, $2\n" if $debug;
$help{$1} = $help{$2} = 1;
}
}
}
close $fh
or die "$ME: Error running 'skopeo @_ --help'\n";
return \%help;
}
################
# skopeo_man # Parse contents of skopeo-*.1.md
################
sub skopeo_man {
my $command = shift;
my $manpath = "$Docs_Path/$command.1.md";
print "** $manpath \n" if $debug;
my %man = (_path => $manpath);
open my $fh, '<', $manpath
or die "$ME: Cannot read $manpath: $!\n";
my $section = '';
my @most_recent_flags;
my $previous_subcmd = '';
while (my $line = <$fh>) {
chomp $line;
next unless $line; # skip empty lines
# .md files designate sections with leading double hash
if ($line =~ /^##\s*OPTIONS/) {
$section = 'flags';
}
elsif ($line =~ /^\#\#\s+(SUB)?COMMANDS/) {
$section = 'commands';
}
elsif ($line =~ /^\#\#[^#]/) {
$section = '';
}
# This will be a table containing subcommand names, links to man pages.
elsif ($section eq 'commands') {
# In skopeo.1.md
if ($line =~ /^\|\s*\[skopeo-(\S+?)\(\d\)\]/) {
# $1 will be changed by recursion _*BEFORE*_ left-hand assignment
my $subcmd = $1;
$man{$subcmd} = skopeo_man("skopeo-$1");
}
}
# Options should always be of the form '**-f**' or '**\-\-flag**',
# possibly separated by comma-space.
elsif ($section eq 'flags') {
# If option has long and short form, long must come first.
# This is a while-loop because there may be multiple long
# option names (not in skopeo ATM, but leave the possibility open)
while ($line =~ s/^\*\*(--[a-z0-9.-]+)\*\*(=\*[a-zA-Z0-9-]+\*)?(,\s+)?//g) {
$man{$1} = 1;
}
# Short form
if ($line =~ s/^\*\*(-[a-zA-Z0-9.])\*\*(=\*[a-zA-Z0-9-]+\*)?//g) {
$man{$1} = 1;
}
}
}
close $fh;
return \%man;
}
# END data gathering
###############################################################################
1;

View File

@@ -44,6 +44,14 @@ sudo dnf -y install skopeo
sudo yum -y install skopeo
```
Newer Skopeo releases may be available on the repositories provided by the
Kubic project. Beware, these may not be suitable for production environments.
```sh
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo
sudo yum -y install skopeo
```
### openSUSE
```sh
@@ -79,6 +87,49 @@ sudo apt-get update
sudo apt-get -y install skopeo
```
If you would prefer newer (though not as well-tested) packages,
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
provides packages for Debian 10 and newer. The packages in Kubic project repos are more frequently
updated than the one in Debian's official repositories, due to how Debian works.
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/skopeo/-/tree/debian/debian).
CAUTION: On Debian 11 and newer, including Debian Testing and Sid, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
OR the official Debian repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
```bash
# Debian 10
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install skopeo
# Debian Testing
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install skopeo
# Debian Sid/Unstable
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install skopeo
```
### Raspberry Pi OS armhf (ex Raspbian)
The Kubic project provides packages for Raspbian 10.
```bash
# Raspbian 10
echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/Release.key | sudo apt-key add -
sudo apt-get update -qq
sudo apt-get -qq -y install skopeo
```
### Raspberry Pi OS arm64 (beta)
Raspberry Pi OS uses the standard Debian's repositories,
@@ -99,7 +150,7 @@ sudo apt-get -y install skopeo
If you would prefer newer (though not as well-tested) packages,
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS).
provides packages for active Ubuntu releases 18.04 and newer (it should also work with direct derivatives like Pop!\_OS).
Checkout the [Kubic project page](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
for a list of supported Ubuntu version and
architecture combinations. **NOTE:** The command `sudo apt-get -y upgrade`
@@ -146,7 +197,7 @@ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-dev
```bash
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev pkg-config
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
```
```bash
@@ -182,11 +233,6 @@ sudo apt-get install go-md2man
sudo dnf install go-md2man
```
```
# MacOS:
brew install go-md2man
```
Then
```bash

View File

@@ -101,7 +101,7 @@ func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
// copy to private registry using local authentication
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://docker.io/library/busybox:latest", imageName)
// inspect from private registry
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
// logout from the registry

View File

@@ -246,21 +246,6 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
c.Assert(out, check.Equals, "")
}
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
tempdir, err := ioutil.TempDir("", "tempdir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempdir)
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
digestOutPath := filepath.Join(tempdir, "digest.txt")
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
readDigest, err := ioutil.ReadFile(digestOutPath)
c.Assert(err, check.IsNil)
_, err = digest.Parse(string(readDigest))
c.Assert(err, check.IsNil)
}
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
c.Assert(err, check.IsNil)
@@ -318,8 +303,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
var image2 imgspecv1.Image
err = json.Unmarshal([]byte(i2), &image2)
@@ -354,8 +339,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
err = json.Unmarshal([]byte(i2), &image2)
c.Assert(err, check.IsNil)
c.Assert(image2.Architecture, check.Equals, "amd64")
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i3 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+arm64Instance.String())
var image3 imgspecv1.Image
err = json.Unmarshal([]byte(i3), &image3)
@@ -385,8 +370,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
err = json.Unmarshal([]byte(i1), &image1)
c.Assert(err, check.IsNil)
c.Assert(image1.Architecture, check.Equals, "amd64")
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
var image2 imgspecv1.Image
err = json.Unmarshal([]byte(i2), &image2)
@@ -417,7 +402,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -452,7 +437,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -506,12 +491,12 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
// "push": dir: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
// The result of pushing and pulling is an equivalent image, except for schema1 embedded names.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:unsigned")
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:unsigned")
}
// The most basic (skopeo copy) use:
@@ -568,7 +553,6 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(keysDir)
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
c.Assert(err, check.IsNil)
defer os.RemoveAll(undecryptedImgDir)
multiLayerImageDir, err := ioutil.TempDir("", "copy-6")
c.Assert(err, check.IsNil)
@@ -603,7 +587,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
// Copy a standard busybox image locally
assertSkopeoSucceeds(c, "", "copy", testFQIN+":1.30.1", "oci:"+originalImageDir+":latest")
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:1.31.1", "oci:"+originalImageDir+":latest")
// Encrypt the image
assertSkopeoSucceeds(c, "", "copy", "--encryption-key",
@@ -634,7 +618,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
matchLayerBlobBinaryType(c, decryptedImgDir+"/blobs/sha256", "application/x-gzip", 1)
// Copy a standard multi layer nginx image locally
assertSkopeoSucceeds(c, "", "copy", testFQINMultiLayer, "oci:"+multiLayerImageDir+":latest")
assertSkopeoSucceeds(c, "", "copy", "docker://nginx:1.17.8", "oci:"+multiLayerImageDir+":latest")
// Partially encrypt the image
assertSkopeoSucceeds(c, "", "copy", "--encryption-key", "jwe:"+keysDir+"/public.key",
@@ -739,11 +723,11 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// streaming: docker: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN64, "atomic:localhost:5000/myns/unsigned:streaming")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
// Compare (copies of) the original and the copy:
assertSkopeoSucceeds(c, "", "copy", testFQIN64, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "libpod/busybox:amd64", dir2, "myns/unsigned:streaming")
assertSchema1DirImagesAreEqualExceptNames(c, dir1, "estesp/busybox:amd64", dir2, "myns/unsigned:streaming")
// FIXME: Also check pushing to docker://
}
@@ -763,7 +747,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
defer os.RemoveAll(oci2)
// Docker -> OCI
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
// OCI -> Docker
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
// Docker -> OCI
@@ -814,16 +798,16 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
defer os.Remove(policy)
// type: reject
assertSkopeoFails(c, fmt.Sprintf(".*Source image rejected: Running image %s:latest is rejected by policy.*", testFQIN),
"--policy", policy, "copy", testFQIN+":latest", dirDest)
assertSkopeoFails(c, ".*Source image rejected: Running image docker://busybox:latest is rejected by policy.*",
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
// type: insecureAcceptAnything
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest)
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://openshift/hello-openshift", dirDest)
// type: signedBy
// Sign the images
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", testFQIN+":1.26", "atomic:localhost:5006/myns/personal:personal")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", testFQIN+":1.26.1", "atomic:localhost:5006/myns/official:official")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "personal@example.com", "docker://busybox:1.26", "atomic:localhost:5006/myns/personal:personal")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--sign-by", "official@example.com", "docker://busybox:1.26.1", "atomic:localhost:5006/myns/official:official")
// Verify that we can pull them
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/personal:personal", dirDest)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "atomic:localhost:5006/myns/official:official", dirDest)
@@ -877,8 +861,8 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
defer os.Remove(policy)
// Get some images.
assertSkopeoSucceeds(c, "", "copy", testFQIN+":armfh", topDirDest+"/dir1")
assertSkopeoSucceeds(c, "", "copy", testFQIN+":s390x", topDirDest+"/dir2")
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
// Sign the images. By coping from a topDirDest/dirN, also test that non-/restricted paths
// use the dir:"" default of insecureAcceptAnything.
@@ -994,7 +978,7 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
c.Assert(err, check.IsNil)
// Get an image to work with. Also verifies that we can use Docker repositories with no sigstore configured.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, ourRegistry+"original/busybox")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", ourRegistry+"original/busybox")
// Pulling an unsigned image fails.
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"original/busybox", dirDest)
@@ -1048,7 +1032,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
defer os.Remove(policy)
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", testFQIN, "atomic:localhost:5000/myns/extension:unsigned")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
// Pulling an unsigned image using atomic: fails.
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--tls-verify=false", "--policy", policy,
@@ -1072,7 +1056,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
// Get another image (different so that they don't share signatures, and sign it using docker://)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
"copy", "--sign-by", "personal@example.com", testFQIN+":ppc64le", "docker://localhost:5000/myns/extension:extension")
"copy", "--sign-by", "personal@example.com", "docker://estesp/busybox:ppc64le", "docker://localhost:5000/myns/extension:extension")
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
// Pulling the image using atomic: succeeds.
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
@@ -1093,10 +1077,12 @@ func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, regis
signingDir := filepath.Join(topDir, "signing-temp")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
}
@@ -1126,7 +1112,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
// Get an image to work with.
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN, regPrefix+"primary:unsigned")
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://busybox", regPrefix+"primary:unsigned")
// Verify that unsigned images are rejected
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", dirDest)
@@ -1169,11 +1155,11 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
// and only the remapped prefix (mirror) is accessed.
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
assertSkopeoFails(c, ".*Error initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
}
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
dir1, err := ioutil.TempDir("", "copy-1")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
@@ -1181,11 +1167,11 @@ func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
}
func (s *SkopeoSuite) TestCopyDestWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
}
func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
}
@@ -1215,7 +1201,7 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
// oci to v2s1 and vice-versa not supported yet
// get v2s2 manifest type
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+srcDir)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+srcDir)
verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType)
// convert from v2s2 to oci
assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1)
@@ -1245,7 +1231,7 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
// Ensure we are working with a schema2 image.
// dir: accepts any manifest format, i.e. this makes …/input2 a schema2 source which cannot be asked to produce schema1 like ordinary docker: registries can.
assertSkopeoSucceeds(c, "", "copy", testFQIN, "dir:"+input2Dir)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+input2Dir)
verifyManifestMIMEType(c, input2Dir, manifest.DockerV2Schema2MediaType)
// 2→2 (the "f2t2" in tag means "from 2 to 2")
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "dir:"+input2Dir, schema2Registry+":f2t2")
@@ -1265,6 +1251,14 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
}
// Verify manifest in a dir: image at dir is expectedMIMEType.
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
c.Assert(err, check.IsNil)
mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType)
}
const regConfFixture = "./fixtures/registries.conf"
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {

View File

@@ -50,11 +50,6 @@
{
"type": "insecureAcceptAnything"
}
],
"quay.io/openshift": [
{
"type": "insecureAcceptAnything"
}
]
},
"dir": {

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/base64"
"fmt"
"io/ioutil"
@@ -10,7 +9,6 @@ import (
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/pkg/homedir"
"github.com/go-check/check"
@@ -64,7 +62,6 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
cmd := cluster.clusterCmd(nil, "openshift", "start", "master")
cluster.processes = append(cluster.processes, cmd)
stdout, err := cmd.StdoutPipe()
c.Assert(err, check.IsNil)
// Send both to the same pipe. This might cause the two streams to be mixed up,
// but logging actually goes only to stderr - this primarily ensure we log any
// unexpected output to stdout.
@@ -111,8 +108,6 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
gotPortCheck := false
gotLogCheck := false
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
for !gotPortCheck || !gotLogCheck {
c.Logf("Waiting for master")
select {
@@ -125,8 +120,6 @@ func (cluster *openshiftCluster) startMaster(c *check.C) {
c.Fatal("log check done, success message not found")
}
gotLogCheck = true
case <-ctx.Done():
c.Fatalf("Timed out waiting for master: %v", ctx.Err())
}
}
c.Logf("OK, master started!")
@@ -172,14 +165,8 @@ func (cluster *openshiftCluster) startRegistryProcess(c *check.C, port int, conf
terminatePortCheck <- true
}()
c.Logf("Waiting for registry to start")
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
select {
case <-portOpen:
c.Logf("OK, Registry port open")
case <-ctx.Done():
c.Fatalf("Timed out waiting for registry to start: %v", ctx.Err())
}
<-portOpen
c.Logf("OK, Registry port open")
return cmd
}

View File

@@ -19,8 +19,8 @@ to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make bin/skopeo PREFIX=/usr install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
cd ..; make bin/skopeo install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json
curl -L -v 'http://localhost:5000/v2/'
cat ~/.docker/config.json

View File

@@ -12,10 +12,8 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/go-check/check"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
@@ -474,26 +472,6 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
}
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
destDir1 := filepath.Join(tmpDir, "dest1")
destDir2 := filepath.Join(tmpDir, "dest2")
destDir3 := filepath.Join(tmpDir, "dest3")
//Split image:tag path from image URI for manifest comparison
imageDir := pullableTaggedImage[strings.LastIndex(pullableTaggedImage, "/")+1:]
assertSkopeoSucceeds(c, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1)
verifyManifestMIMEType(c, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest)
assertSkopeoSucceeds(c, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2)
verifyManifestMIMEType(c, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType)
assertSkopeoSucceeds(c, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3)
verifyManifestMIMEType(c, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType)
}
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
@@ -635,7 +613,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir)
//tagged
assertSkopeoFails(c, ".*reading manifest.*",
assertSkopeoFails(c, ".*Error reading manifest.*",
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
}

View File

@@ -10,17 +10,12 @@ import (
"strings"
"time"
"github.com/containers/image/v5/manifest"
"github.com/go-check/check"
)
const skopeoBinary = "skopeo"
const decompressDirsBinary = "./decompress-dirs.sh"
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {
c.Assert(err, check.IsNil)
@@ -205,11 +200,3 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
}
}
// Verify manifest in a dir: image at dir is expectedMIMEType.
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
c.Assert(err, check.IsNil)
mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType)
}

View File

@@ -1,85 +0,0 @@
let
pkgs = (import ./nixpkgs.nix {
crossSystem = {
config = "aarch64-unknown-linux-gnu";
};
config = {
packageOverrides = pkg: {
gpgme = (static pkg.gpgme);
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs (x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
"-Ddevbindir=${placeholder ''dev''}/bin"
"-Dgtk_doc=false"
"-Dnls=disabled"
];
postInstall = ''
moveToOutput "share/glib-2.0" "$dev"
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
'';
});
pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
configureFlags = [
"--enable-confdir=/etc"
"--enable-usbdropdir=/var/lib/pcsc/drivers"
"--disable-libsystemd"
"--disable-libudev"
"--disable-libusb"
];
buildInputs = [ pkgs.python3 pkgs.dbus ];
});
systemd = (static pkg.systemd).overrideAttrs (x: {
outputs = [ "out" "dev" ];
mesonFlags = x.mesonFlags ++ [
"-Dglib=false"
"-Dstatic-libsystemd=true"
];
});
};
};
});
static = pkg: pkg.overrideAttrs (x: {
doCheck = false;
configureFlags = (x.configureFlags or [ ]) ++ [
"--without-shared"
"--disable-shared"
];
dontDisableStatic = true;
enableSharedExecutables = false;
enableStatic = true;
});
self = with pkgs; buildGoModule rec {
name = "skopeo";
src = ./..;
vendorSha256 = null;
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static -pthread'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
export CGO_ENABLED=1
'';
buildPhase = ''
patchShebangs .
make bin/skopeo
'';
installPhase = ''
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in
self

View File

@@ -7,7 +7,7 @@ let
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs (x: {
glib = (static pkg.glib).overrideAttrs(x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
@@ -15,38 +15,14 @@ let
"-Dgtk_doc=false"
"-Dnls=disabled"
];
postInstall = ''
moveToOutput "share/glib-2.0" "$dev"
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
'';
});
pcsclite = (static pkg.pcsclite).overrideAttrs (x: {
configureFlags = [
"--enable-confdir=/etc"
"--enable-usbdropdir=/var/lib/pcsc/drivers"
"--disable-libsystemd"
"--disable-libudev"
"--disable-libusb"
];
buildInputs = [ pkgs.python3 pkgs.dbus ];
});
systemd = (static pkg.systemd).overrideAttrs (x: {
outputs = [ "out" "dev" ];
mesonFlags = x.mesonFlags ++ [
"-Dglib=false"
"-Dstatic-libsystemd=true"
];
});
};
};
});
static = pkg: pkg.overrideAttrs (x: {
static = pkg: pkg.overrideAttrs(x: {
doCheck = false;
configureFlags = (x.configureFlags or [ ]) ++ [
configureFlags = (x.configureFlags or []) ++ [
"--without-shared"
"--disable-shared"
];
@@ -62,14 +38,13 @@ let
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash gitMinimal go-md2man pkg-config which ];
buildInputs = [ glibc glibc.static glib gpgme libassuan libgpgerror libseccomp ];
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static -pthread'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
export CGO_ENABLED=1
'';
buildPhase = ''
patchShebangs .
@@ -79,5 +54,4 @@ let
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in
self
in self

View File

@@ -1,9 +1,9 @@
{
"url": "https://github.com/nixos/nixpkgs",
"rev": "2a96414d7e350160a33ed0978449c9ff5b5a6eb3",
"date": "2021-07-13T18:21:47+02:00",
"path": "/nix/store/2ai9q8ac6vxb2rrngdz82y8jxnk15cvm-nixpkgs",
"sha256": "1dzrfqdjq3yq5jjskiqflzy58l2xx6059gay9p1k07zrlm1wigy5",
"rev": "4a75203f0270f96cbc87f5dfa5d5185690237d87",
"date": "2020-12-29T03:18:48+01:00",
"path": "/nix/store/scswsm6r4jnhp9ki0f6s81kpj5x6jkn7-nixpkgs",
"sha256": "0h70fm9aa7s06wkalbadw70z5rscbs3p6nblb47z523nhlzgjxk9",
"fetchSubmodules": false,
"deepClone": false,
"leaveDotGit": false

View File

@@ -5,5 +5,4 @@ let
url = "${json.url}/archive/${json.rev}.tar.gz";
inherit (json) sha256;
});
in
nixpkgs
in nixpkgs

51
release/Makefile Normal file
View File

@@ -0,0 +1,51 @@
## This Makefile is used to publish skopeo container images with Travis CI ##
## Environment variables, used in this Makefile are specified in .travis.yml
export DOCKER_CLI_EXPERIMENTAL=enabled
GOARCH ?= $(shell go env GOARCH)
# Dereference variable $(1), return value if non-empty, otherwise raise an error.
err_if_empty = $(if $(strip $($(1))),$(strip $($(1))),$(error Required $(1) variable is undefined or empty))
# Requires two arguments: Names of the username and the password env. vars.
define quay_login
@echo "$(call err_if_empty,$(2))" | \
docker login quay.io -u "$(call err_if_empty,$(1))" --password-stdin
endef
# Build container image of skopeo upstream based on host architecture
build-image/upstream:
docker build -t "${UPSTREAM_IMAGE}-${GOARCH}" contrib/skopeoimage/upstream
# Build container image of skopeo stable based on host architecture
build-image/stable:
docker build -t "${STABLE_IMAGE}-${GOARCH}" contrib/skopeoimage/stable
# Push container image of skopeo upstream (based on host architecture) to image repository
push-image/upstream:
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker push "${UPSTREAM_IMAGE}-${GOARCH}"
# Push container image of skopeo stable (based on host architecture) to image default and extra repositories
push-image/stable:
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker push "${STABLE_IMAGE}-${GOARCH}"
docker tag "${STABLE_IMAGE}-${GOARCH}" "${EXTRA_STABLE_IMAGE}-${GOARCH}"
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
docker push "${EXTRA_STABLE_IMAGE}-${GOARCH}"
# Create and push multiarch image manifest of skopeo upstream
push-manifest-multiarch/upstream:
docker manifest create "${UPSTREAM_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${UPSTREAM_IMAGE}-${arch})
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker manifest push --purge "${UPSTREAM_IMAGE}"
# Create and push multiarch image manifest of skopeo stable
push-manifest-multiarch/stable:
docker manifest create "${STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${STABLE_IMAGE}-${arch})
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker manifest push --purge "${STABLE_IMAGE}"
# Push to extra repository
docker manifest create "${EXTRA_STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${EXTRA_STABLE_IMAGE}-${arch})
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
docker manifest push --purge "${EXTRA_STABLE_IMAGE}"

40
release/README.md Normal file
View File

@@ -0,0 +1,40 @@
# skopeo container image build with Travis
This document describes the details and requirements to build and publish skopeo container images. The images are published as several architecture specific images and multiarch images on top for upstream and stable versions.
The Travis configuration is available at `.travis.yml`.
The code to build and publish images is available at `release/Makefile` and should be used only via Travis.
Travis workflow has 3 major pieces:
- `local-build` - build and test source code locally on osx and linux/amd64 environments, 2 jobs are running in parallel
- `image-build-push` - build and push container images with several Travis jobs running in parallel to build images for several architectures (linux/amd64, linux/s390x, linux/ppc64le). Build part is done for each PR, push part is executed only in case of cron job or master branch update.
- `manifest-multiarch-push` - create and push image manifests, which consists of architecture specific images from previous step. Executed only in case of cron job or master branch update.
## Ways to have full workflow run
- [cron job](https://docs.travis-ci.com/user/cron-jobs/#adding-cron-jobs)
- Trigger build from Travis CI
- Update code in master branch
## Environment variables
Several environment variables are used to customize image names and keep private credentials to push to quay.io repositories.
**Image tags** are specified in environment variable and should be manually updated in case of new release.
- `SKOPEO_QUAY_USERNAME` and `SKOPEO_QUAY_PASSWORD` are credentials to push images to `quay.io/skopeo/stable` and `quay.io/skopeo/upstream` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
- `CONTAINERS_QUAY_USERNAME` and `CONTAINERS_QUAY_PASSWORD` are credentials to push images to `quay.io/containers/skopeo` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
Variables in .travis.yml
- `MULTIARCH_MANIFEST_ARCHITECTURES` is a list with architecture shortnames, to appear in final multiarch manifest. The values should fit to architectures used in the `image-build-push` Travis step.
- `STABLE_IMAGE`, `EXTRA_STABLE_IMAGE` are image names to publish stable Skopeo.
- `UPSTREAM_IMAGE` is an image name to publish upstream Skopeo.
### Values for environment variables
| Env variable | Value |
| -------------------------------- |----------------------------------|
| MULTIARCH_MANIFEST_ARCHITECTURES | "amd64 s390x ppc64le" |
| STABLE_IMAGE | quay.io/skopeo/stable:v1.2.0 |
| EXTRA_STABLE_IMAGE | quay.io/containers/skopeo:v1.2.0 |
| UPSTREAM_IMAGE | quay.io/skopeo/upstream:master |

View File

@@ -93,7 +93,7 @@ END_EXPECT
# By default, 'inspect' tries to match our host os+arch. This should fail.
run_skopeo 1 inspect $img
expect_output --substring "parsing manifest for image: choosing image instance: no image found in manifest list for architecture $arch, variant " \
expect_output --substring "Error parsing manifest for image: Error choosing image instance: no image found in manifest list for architecture $arch, variant " \
"skopeo inspect, without --raw, fails"
# With --raw, we can inspect

View File

@@ -45,7 +45,7 @@ function setup() {
}
# Compression zstd
@test "copy: oci, zstd" {
@test "copy: oci, round trip, zstd" {
local remote_image=docker://quay.io/libpod/busybox:latest
local dir=$TESTDIR/dir
@@ -57,12 +57,6 @@ function setup() {
# Check there is at least one file that has the zstd magic number as the first 4 bytes
(for i in $dir/blobs/sha256/*; do test "$(head -c 4 $i)" = $magic && exit 0; done; exit 1)
# Check that the manifest's description of the image's first layer is the zstd layer type
instance=$(jq -r '.manifests[0].digest' $dir/index.json)
[[ "$instance" != null ]]
mediatype=$(jq -r '.layers[0].mediaType' < $dir/blobs/${instance/://})
[[ "$mediatype" == "application/vnd.oci.image.layer.v1.tar+zstd" ]]
}
# Same image, extracted once with :tag and once without
@@ -100,50 +94,6 @@ function setup() {
docker://localhost:5000/foo
}
# manifest format
@test "copy: manifest format" {
local remote_image=docker://quay.io/libpod/busybox:latest
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
run_skopeo copy --format v2s2 $remote_image dir:$dir1
run_skopeo copy --format oci $remote_image dir:$dir2
grep 'application/vnd.docker.distribution.manifest.v2' $dir1/manifest.json
grep 'application/vnd.oci.image' $dir2/manifest.json
}
# additional tag
@test "copy: additional tag" {
local remote_image=docker://quay.io/libpod/busybox:latest
# additional-tag is supported only for docker-archive
run_skopeo copy --additional-tag busybox:mine $remote_image \
docker-archive:$TESTDIR/mybusybox.tar:busybox:latest
mkdir -p $TESTDIR/podmanroot
run podman --root $TESTDIR/podmanroot load -i $TESTDIR/mybusybox.tar
run podman --root $TESTDIR/podmanroot images
expect_output --substring "mine"
}
# shared blob directory
@test "copy: shared blob directory" {
local remote_image=docker://quay.io/libpod/busybox:latest
local shareddir=$TESTDIR/shareddir
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
run_skopeo copy --dest-shared-blob-dir $shareddir \
$remote_image oci:$dir1
[ -n "$(ls $shareddir)" ]
[ -z "$(ls $dir1/blobs)" ]
run_skopeo copy --src-shared-blob-dir $shareddir \
oci:$dir1 oci:$dir2
diff -urN $shareddir $dir2/blobs
}
teardown() {
podman rm -f reg

View File

@@ -8,7 +8,7 @@ load helpers
function setup() {
standard_setup
start_registry --with-cert --enable-delete=true reg
start_registry --with-cert reg
}
@test "local registry, with cert" {
@@ -21,15 +21,6 @@ function setup() {
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned \
dir:$TESTDIR/extracted
# inspect with cert
run_skopeo inspect --cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned
expect_output --substring "localhost:5000/busybox"
# delete with cert
run_skopeo delete --cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned
}
teardown() {

View File

@@ -18,7 +18,7 @@ function setup() {
testuser=testuser
testpassword=$(random_string 15)
start_registry --testuser=$testuser --testpassword=$testpassword --enable-delete=true reg
start_registry --testuser=$testuser --testpassword=$testpassword reg
}
@test "auth: credentials on command line" {
@@ -67,47 +67,6 @@ function setup() {
expect_output --substring "unauthorized: authentication required"
}
@test "auth: copy with --src-creds and --dest-creds" {
run_skopeo copy --dest-tls-verify=false --dest-creds=$testuser:$testpassword \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo copy --src-tls-verify=false --src-creds=$testuser:$testpassword \
docker://localhost:5000/busybox:mine \
dir:$TESTDIR/dir1
run ls $TESTDIR/dir1
expect_output --substring "manifest.json"
}
@test "auth: credentials via authfile" {
podman login --tls-verify=false --authfile $TESTDIR/test.auth -u $testuser -p $testpassword localhost:5000
# copy without authfile: should fail
run_skopeo 1 copy --dest-tls-verify=false \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
# copy with authfile: should work
run_skopeo copy --dest-tls-verify=false \
--authfile $TESTDIR/test.auth \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
# inspect without authfile: should fail
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "unauthorized: authentication required"
# inspect with authfile: should work
run_skopeo inspect --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
expect_output --substring "localhost:5000/busybox"
# delete without authfile: should fail
run_skopeo 1 delete --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "authentication required"
# delete with authfile: should work
run_skopeo delete --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
}
teardown() {
podman rm -f reg

View File

@@ -143,75 +143,6 @@ END_PUSH
END_TESTS
}
@test "signing: remove signature" {
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
if [[ "$output" =~ 'signing is not supported' ]]; then
skip "skopeo built without support for creating signatures"
return 1
fi
if [ "$status" -ne 0 ]; then
die "exit code is $status; expected 0"
fi
# Cache local copy
run_skopeo copy docker://quay.io/libpod/busybox:latest \
dir:$TESTDIR/busybox
# Push a signed image
run_skopeo --registries.d $REGISTRIES_D \
copy --dest-tls-verify=false \
--sign-by=alice@test.redhat.com \
dir:$TESTDIR/busybox \
docker://localhost:5000/myns/alice:signed
# Fetch the image with signature
run_skopeo --registries.d $REGISTRIES_D \
--policy $POLICY_JSON \
copy --src-tls-verify=false \
docker://localhost:5000/myns/alice:signed \
dir:$TESTDIR/busybox-signed
# Fetch the image with removing signature
run_skopeo --registries.d $REGISTRIES_D \
--policy $POLICY_JSON \
copy --src-tls-verify=false \
--remove-signatures \
docker://localhost:5000/myns/alice:signed \
dir:$TESTDIR/busybox-unsigned
ls $TESTDIR/busybox-signed | grep "signature"
[ -z "$(ls $TESTDIR/busybox-unsigned | grep "signature")" ]
}
@test "signing: standalone" {
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
if [[ "$output" =~ 'signing is not supported' ]]; then
skip "skopeo built without support for creating signatures"
return 1
fi
if [ "$status" -ne 0 ]; then
die "exit code is $status; expected 0"
fi
run_skopeo copy --dest-tls-verify=false \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:latest
run_skopeo copy --src-tls-verify=false \
docker://localhost:5000/busybox:latest \
dir:$TESTDIR/busybox
# Standalone sign
run_skopeo standalone-sign -o $TESTDIR/busybox.signature \
$TESTDIR/busybox/manifest.json \
localhost:5000/busybox:latest \
alice@test.redhat.com
# Standalone verify
fingerprint=$(gpg --list-keys | grep -B1 alice.test.redhat.com | head -n 1)
run_skopeo standalone-verify $TESTDIR/busybox/manifest.json \
localhost:5000/busybox:latest \
$fingerprint \
$TESTDIR/busybox.signature
# manifest digest
digest=$(echo "$output" | awk '{print $4;}')
run_skopeo manifest-digest $TESTDIR/busybox/manifest.json
expect_output $digest
}
teardown() {
podman rm -f reg

View File

@@ -1 +0,0 @@
* @microsoft/containerplat

View File

@@ -1,4 +1,4 @@
# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and

View File

@@ -16,7 +16,6 @@ import (
"time"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
)
const (
@@ -298,11 +297,11 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
// Default to ModTime, we'll pull hdrCreationTime below if present
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
CreationTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
}
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
@@ -320,7 +319,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
if err != nil {
return "", 0, nil, err
}
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
fileInfo.CreationTime = syscall.NsecToFiletime(creationTime.UnixNano())
}
return
}

View File

@@ -5,14 +5,21 @@ package winio
import (
"os"
"runtime"
"syscall"
"unsafe"
)
"golang.org/x/sys/windows"
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
const (
fileBasicInfo = 0
fileIDInfo = 0x12
)
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uint32
pad uint32 // padding
}
@@ -20,7 +27,7 @@ type FileBasicInfo struct {
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -29,32 +36,13 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return nil
}
// FileStandardInfo contains extended information for the file.
// FILE_STANDARD_INFO in WinBase.h
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
type FileStandardInfo struct {
AllocationSize, EndOfFile int64
NumberOfLinks uint32
DeletePending, Directory bool
}
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return si, nil
}
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
// unique on a system.
type FileIDInfo struct {
@@ -65,7 +53,7 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)

View File

@@ -4,6 +4,6 @@ go 1.12
require (
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
github.com/sirupsen/logrus v1.4.1
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
)

View File

@@ -1,14 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -1,5 +1,3 @@
// +build windows
package winio
import (

View File

@@ -1,5 +1,3 @@
// +build windows
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,

View File

@@ -1,5 +1,3 @@
// +build windows
package security
import (

View File

@@ -1,4 +1,4 @@
// Code generated by 'go generate'; DO NOT EDIT.
// Code generated mksyscall_windows.exe DO NOT EDIT
package security
@@ -19,7 +19,6 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -27,7 +26,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@@ -41,22 +40,18 @@ var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
)
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -64,7 +59,23 @@ func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@@ -28,9 +28,8 @@ const (
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
)
const (

View File

@@ -1,3 +1,3 @@
package winio
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go

View File

@@ -11,7 +11,7 @@ import (
"golang.org/x/sys/windows"
)
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
@@ -176,7 +176,9 @@ func DetachVhd(path string) error {
// AttachVirtualDisk attaches a virtual hard disk for use.
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
if parameters.Version != 2 {
return fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
if err := attachVirtualDisk(
handle,
nil,
@@ -190,8 +192,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
return nil
}
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
// AttachVhd attaches a virtual hard disk at `path` for use.
func AttachVhd(path string) (err error) {
handle, err := OpenVirtualDisk(
path,

View File

@@ -1,4 +1,4 @@
// Code generated by 'go generate'; DO NOT EDIT.
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package vhd
@@ -19,7 +19,6 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -27,7 +26,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@@ -40,21 +39,13 @@ func errnoErr(e syscall.Errno) error {
var (
modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll")
procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk")
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk")
procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath")
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
)
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
@@ -67,23 +58,11 @@ func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virt
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
if r1 != 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -100,7 +79,47 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@@ -19,7 +19,6 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -27,7 +26,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@@ -38,62 +37,243 @@ func errnoErr(e syscall.Errno) error {
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procbind = modws2_32.NewProc("bind")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
err = errnoErr(e1)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -101,7 +281,11 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -118,73 +302,126 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
return
}
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
} else {
_p0 = 0
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
} else {
_p0 = 0
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
@@ -205,27 +442,53 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -238,14 +501,22 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -258,162 +529,22 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
err = errnoErr(e1)
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -421,7 +552,11 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@@ -1,3 +1 @@
*.exe
.idea
.vscode

17
vendor/github.com/Microsoft/hcsshim/.gometalinter.json generated vendored Normal file
View File

@@ -0,0 +1,17 @@
{
"Vendor": true,
"Deadline": "2m",
"Sort": [
"linter",
"severity",
"path",
"line"
],
"Skip": [
"internal\\schema2"
],
"EnableGC": true,
"Enable": [
"gofmt"
]
}

View File

@@ -1 +1,3 @@
* @microsoft/containerplat
* @microsoft/containerplat
/hcn/* @nagiesek

View File

@@ -25,7 +25,6 @@ plugins = ["grpc", "fieldpath"]
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
@@ -36,14 +35,20 @@ plugins = ["grpc", "fieldpath"]
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
plugins = ["ttrpc"]
# Lock down runhcs config
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
plugins = ["ttrpc"]
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
plugins = ["ttrpc"]
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
target = "cmd/containerd-shim-runhcs-v1/stats/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]

View File

@@ -1,6 +1,6 @@
# hcsshim
[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).

45
vendor/github.com/Microsoft/hcsshim/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,45 @@
version: 0.1.{build}
image: Visual Studio 2017
clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim
environment:
GOPATH: c:\gopath
PATH: "%GOPATH%\\bin;C:\\gometalinter-2.0.12-windows-amd64;%PATH%"
stack: go 1.13.4
build_script:
- appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip
- 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL
- gometalinter.exe --config .gometalinter.json ./...
- go build ./cmd/containerd-shim-runhcs-v1
- go build ./cmd/runhcs
- go build ./cmd/tar2ext4
- go build ./cmd/wclayer
- go build ./cmd/device-util
- go build ./internal/tools/grantvmgroupaccess
- go build ./internal/tools/uvmboot
- go build ./internal/tools/zapdir
- go test -v ./... -tags admin
- cd test
- go test -v ./internal -tags admin
- go test -c ./containerd-shim-runhcs-v1/ -tags functional
- go test -c ./cri-containerd/ -tags functional
- go test -c ./functional/ -tags functional
- go test -c ./runhcs/ -tags functional
artifacts:
- path: 'containerd-shim-runhcs-v1.exe'
- path: 'runhcs.exe'
- path: 'tar2ext4.exe'
- path: 'device-util.exe'
- path: 'wclayer.exe'
- path: 'grantvmgroupaccess.exe'
- path: 'uvmboot.exe'
- path: 'zapdir.exe'
- path: './test/containerd-shim-runhcs-v1.test.exe'
- path: './test/cri-containerd.test.exe'
- path: './test/functional.test.exe'
- path: './test/runhcs.test.exe'

View File

@@ -3,9 +3,9 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@@ -18,7 +18,7 @@ import (
// `layerData` is the parent read-only layer data.
func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) {
title := "hcsshim.AttachLayerStorageFilter"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -32,7 +32,7 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L
err = hcsAttachLayerStorageFilter(layerPath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to attach layer storage filter")
return fmt.Errorf("failed to attach layer storage filter: %s", err)
}
return nil
}

View File

@@ -2,9 +2,9 @@ package computestorage
import (
"context"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@@ -13,14 +13,14 @@ import (
// `layerPath` is a path to a directory containing the layer to export.
func DestroyLayer(ctx context.Context, layerPath string) (err error) {
title := "hcsshim.DestroyLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
err = hcsDestroyLayer(layerPath)
if err != nil {
return errors.Wrap(err, "failed to destroy layer")
return fmt.Errorf("failed to destroy layer: %s", err)
}
return nil
}

View File

@@ -2,9 +2,9 @@ package computestorage
import (
"context"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@@ -13,14 +13,14 @@ import (
// `layerPath` is a path to a directory containing the layer to export.
func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) {
title := "hcsshim.DetachLayerStorageFilter"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("layerPath", layerPath))
err = hcsDetachLayerStorageFilter(layerPath)
if err != nil {
return errors.Wrap(err, "failed to detach layer storage filter")
return fmt.Errorf("failed to detach layer storage filter: %s", err)
}
return nil
}

View File

@@ -3,9 +3,9 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@@ -20,7 +20,7 @@ import (
// `options` are the export options applied to the exported layer.
func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) {
title := "hcsshim.ExportLayer"
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
ctx, span := trace.StartSpan(ctx, title)
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(
@@ -40,7 +40,7 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD
err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes))
if err != nil {
return errors.Wrap(err, "failed to export layer")
return fmt.Errorf("failed to export layer: %s", err)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More