Compare commits

..

5 Commits

Author SHA1 Message Date
Valentin Rothberg
67abbb3cef v1.1.1
* Run htpasswd from our build-container instead of registry:2
 * vendor golang.org/x/text@v0.3.3

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-29 11:06:18 +02:00
Miloslav Trmač
747abd054f Merge pull request #984 from mtrmac/htpasswd-1.1
Run htpasswd from our build-container instead of registry:2
2020-07-16 20:20:36 +02:00
Miloslav Trmač
8a664856cd Run htpasswd from our build-container instead of registry:2
registry:2 no longer contains htpasswd.

Also don't use log_and_run ... >> $file
because that will cause the command to be logged to $file.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-07-16 19:09:51 +02:00
Daniel J Walsh
3f81ee46c3 Merge pull request #981 from vrothberg/1.1-update-x/text
[1.1] vendor golang.org/x/text@v0.3.3
2020-07-16 13:05:55 -04:00
Valentin Rothberg
14ba92b2f6 vendor golang.org/x/text@v0.3.3
Fixes: CVE-2020-14040
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-16 11:23:08 +02:00
1746 changed files with 48517 additions and 153020 deletions

View File

@@ -1,167 +0,0 @@
---
# Main collection of env. vars to set for all tasks and scripts.
env:
####
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
DEST_BRANCH: "main"
# Overrides default location (/tmp/cirrus) for repo clone
GOPATH: &gopath "/var/tmp/go"
GOBIN: "${GOPATH}/bin"
GOCACHE: "${GOPATH}/cache"
GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/skopeo"
# Required for consistency with containers/image CI
SKOPEO_PATH: *gosrc
CIRRUS_WORKING_DIR: *gosrc
# The default is 'sh' if unspecified
CIRRUS_SHELL: "/bin/bash"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
####
#### Cache-image names to test with (double-quotes around names are critical)
####
FEDORA_NAME: "fedora-34"
PRIOR_FEDORA_NAME: "fedora-33"
UBUNTU_NAME: "ubuntu-2104"
PRIOR_UBUNTU_NAME: "ubuntu-2010"
# Google-cloud VM Images
IMAGE_SUFFIX: "c6032583541653504"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}"
# Container FQIN's
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
PRIOR_UBUNTU_CONTAINER_FQIN: "quay.io/libpod/prior-ubuntu_podman:${IMAGE_SUFFIX}"
# Equivilent to image produced by 'make build-container'
SKOPEO_CI_CONTAINER_FQIN: "quay.io/skopeo/ci:${DEST_BRANCH}"
# Default timeout for each task
timeout_in: 45m
gcp_credentials: ENCRYPTED[52d9e807b531b37ab14e958cb5a72499460663f04c8d73e22ad608c027a31118420f1c80f0be0882fbdf96f49d8f9ac0]
validate_task:
# The git-validation tool doesn't work well on branch or tag push,
# under Cirrus-CI, due to challenges obtaining the starting commit ID.
# Only do validation for PRs.
only_if: $CIRRUS_PR != ''
container: &build_container
image: "${SKOPEO_CI_CONTAINER_FQIN}"
cpu: 4
memory: 8
script: make validate-local
cross_task:
macos_instance:
image: catalina-xcode
setup_script: |
export PATH=$GOPATH/bin:$PATH
brew install gpgme go go-md2man
go get -u golang.org/x/lint/golint
test_script: |
export PATH=$GOPATH/bin:$PATH
go version
go env
make validate-local test-unit-local bin/skopeo
sudo make install
/usr/local/bin/skopeo -v
#####
##### NOTE: This task is subtantially duplicated in the containers/image
##### repository's `.cirrus.yml`. Changes made here should be fully merged
##### prior to being manually duplicated and maintained in containers/image.
#####
test_skopeo_task:
alias: test_skopeo
depends_on:
- validate
gce_instance:
image_project: libpod-218412
zone: "us-central1-f"
cpu: 2
memory: "4Gb"
# Required to be 200gig, do not modify - has i/o performance impact
# according to gcloud CLI tool warning messages.
disk: 200
image_name: ${FEDORA_CACHE_IMAGE_NAME}
matrix:
- name: "Skopeo Test"
env:
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove'
- name: "Skopeo Test w/ opengpg"
env:
BUILDTAGS: 'btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
setup_script: >-
"${GOSRC}/${SCRIPT_BASE}/runner.sh" setup
vendor_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" vendor
build_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" build
validate_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" validate
unit_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" unit
integration_script: >-
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" integration
system_script: >
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" system
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
name: "VM img. keepalive"
alias: meta
container: &smallcontainer
cpu: 2
memory: 2
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
env:
# Space-separated list of images used by this repository state
IMGNAMES: >-
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
GCPNAME: ENCRYPTED[1cf558ae125e3c39ec401e443ad76452b25d790c45eb73d77c83eb059a0f7fd5085ef7e2f7e410b04ea6e83b0aab2eb1]
GCPPROJECT: libpod-218412
clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"
script: /usr/local/bin/entrypoint.sh
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
# of this task.
success_task:
name: "Total Success"
alias: success
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
depends_on:
- validate
- cross
- test_skopeo
- meta
container: *smallcontainer
env:
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
TEST_ENVIRON: container
clone_script: *noop
script: /bin/true

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "10:00"
timezone: Europe/Berlin
open-pull-requests-limit: 10

View File

@@ -1,25 +0,0 @@
name: Mark stale issues and pull requests
# Please refer to https://github.com/actions/stale/blob/master/action.yml
# to see all config knobs of the stale action.
on:
schedule:
- cron: "0 0 * * *"
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'
stale-pr-message: 'A friendly reminder that this PR had no activity for 30 days.'
stale-issue-label: 'stale-issue'
stale-pr-label: 'stale-pr'
days-before-stale: 30
days-before-close: 365
remove-stale-when-updated: true

6
.gitignore vendored
View File

@@ -1,10 +1,6 @@
*.1
/layers-*
/skopeo
result
# ignore JetBrains IDEs (GoLand) config folder
.idea
# Ignore the bin directory
bin
.idea

View File

@@ -1,75 +1,28 @@
language: go
matrix:
include:
- os: linux
sudo: required
services:
- docker
- os: osx
go:
- 1.15.x
- 1.13.x
notifications:
email: false
env:
global:
# Multiarch manifest will support architectures from this list. It should be the same architectures, as ones in image-build-push step in this Travis config
- MULTIARCH_MANIFEST_ARCHITECTURES="amd64 s390x ppc64le"
# env variables for stable image build
- STABLE_IMAGE=quay.io/skopeo/stable:v1.2.0
- EXTRA_STABLE_IMAGE=quay.io/containers/skopeo:v1.2.0
# env variable for upstream image build
- UPSTREAM_IMAGE=quay.io/skopeo/upstream:master
install:
# NOTE: The (brew update) should not be necessary, and slows things down;
# we include it as a workaround for https://github.com/Homebrew/brew/issues/3299
# ideally Travis should bake the (brew update) into its images
# (https://github.com/travis-ci/travis-ci/issues/8552 ), but thats only going
# to happen around November 2017 per https://blog.travis-ci.com/2017-10-16-a-new-default-os-x-image-is-coming .
# Remove the (brew update) at that time.
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install gpgme ; fi
# Just declaration of the image-build-push step with script actions to execute
x_base_steps:
- &image-build-push
services:
- docker
os: linux
dist: focal
script:
# skopeo upstream image build
- make -f release/Makefile build-image/upstream
# Push image in case if build is started via cron job or code is pushed to master
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
make -f release/Makefile push-image/upstream ;
fi
# skopeo stable image build
- make -f release/Makefile build-image/stable
# Push image in case if build is started via cron job or code is pushed to master
- if [ "$TRAVIS_EVENT_TYPE" == "push" ] || [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then
make -f release/Makefile push-image/stable ;
fi
# Just declaration of stage order to run
stages:
# Build and push image for 1 architecture
- name: image-build-push
if: branch = master
# Create and push image manifest to have multiarch image on top of architecture specific images
- name: manifest-multiarch-push
if: (type IN (push, cron)) AND branch = master
# Actual execution of steps
jobs:
include:
# Run 3 image-build-push tasks in parallel for linux/amd64, linux/s390x and linux/ppc64le platforms (for upstream and stable)
- stage: image-build-push
<<: *image-build-push
name: images for amd64
arch: amd64
- stage: image-build-push
<<: *image-build-push
name: images for s390x
arch: s390x
- stage: image-build-push
<<: *image-build-push
name: images for ppc64le
arch: ppc64le
# Run final task to generate multi-arch image manifests (for upstream and stable)
- stage: manifest-multiarch-push
os: linux
dist: focal
script:
- make -f release/Makefile push-manifest-multiarch/upstream
- make -f release/Makefile push-manifest-multiarch/stable
script:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi

View File

@@ -134,7 +134,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
- create out a new branch in your `skopeo` checkout and switch to it
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
- run `make vendor`
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
- optionally add new integration tests to the skopeo repo
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
- iterate until tests pass and the PR is reviewed
@@ -142,7 +142,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
- run `make vendor`
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
- after tests complete successfully again, merge the skopeo PR
- after tests complete succcesfully again, merge the skopeo PR
## Communications

View File

@@ -13,7 +13,6 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
bats jq podman runc \
golint \
openssl \
&& dnf clean all
# Install two versions of the registry. The first is an older version that
@@ -21,7 +20,6 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2ma
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
RUN set -x \
&& export GO111MODULE=off \
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
&& export GOPATH="$(mktemp -d)" \
@@ -35,7 +33,6 @@ RUN set -x \
&& rm -rf "$GOPATH"
RUN set -x \
&& export GO111MODULE=off \
&& export GOPATH=$(mktemp -d) \
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
@@ -48,7 +45,6 @@ RUN set -x \
ENV GOPATH /usr/share/gocode:/go
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
ENV container_magic 85531765-346b-4316-bdb8-358e4cca9e5d
RUN go version
WORKDIR /go/src/github.com/containers/skopeo
COPY . /go/src/github.com/containers/skopeo

View File

@@ -1,12 +1,13 @@
FROM registry.fedoraproject.org/fedora:33
FROM ubuntu:19.10
RUN dnf update -y && \
dnf install -y \
btrfs-progs-devel \
device-mapper-devel \
golang \
gpgme-devel \
make
RUN apt-get update && apt-get install -y \
golang \
libbtrfs-dev \
git-core \
libdevmapper-dev \
libgpgme11-dev \
go-md2man \
libglib2.0-dev
ENV GOPATH=/
WORKDIR /src/github.com/containers/skopeo

120
Makefile
View File

@@ -2,37 +2,33 @@
export GOPROXY=https://proxy.golang.org
# On some plaforms (eg. macOS, FreeBSD) gpgme is installed in /usr/local/ but /usr/local/include/ is
# not in the default search path. Rather than hard-code this directory, use gpgme-config.
# Sadly that must be done at the top-level user instead of locally in the gpgme subpackage, because cgo
# supports only pkg-config, not general shell scripts, and gpgme does not install a pkg-config file.
ifeq ($(shell uname),Darwin)
PREFIX ?= ${DESTDIR}/usr/local
DARWIN_BUILD_TAG=
# On macOS, (brew install gpgme) installs it within /usr/local, but /usr/local/include is not in the default search path.
# Rather than hard-code this directory, use gpgme-config. Sadly that must be done at the top-level user
# instead of locally in the gpgme subpackage, because cgo supports only pkg-config, not general shell scripts,
# and gpgme does not install a pkg-config file.
# If gpgme is not installed or gpgme-config cant be found for other reasons, the error is silently ignored
# (and the user will probably find out because the cgo compilation will fail).
GPGME_ENV := CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)"
else
PREFIX ?= ${DESTDIR}/usr
endif
# Normally empty, DESTDIR can be used to relocate the entire install-tree
DESTDIR ?=
CONTAINERSCONFDIR ?= ${DESTDIR}/etc/containers
REGISTRIESDDIR ?= ${CONTAINERSCONFDIR}/registries.d
SIGSTOREDIR ?= ${DESTDIR}/var/lib/containers/sigstore
PREFIX ?= ${DESTDIR}/usr/local
BINDIR ?= ${PREFIX}/bin
MANDIR ?= ${PREFIX}/share/man
BASHCOMPLETIONSDIR ?= ${PREFIX}/share/bash-completion/completions
INSTALLDIR=${PREFIX}/bin
MANINSTALLDIR=${PREFIX}/share/man
CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
SIGSTOREDIR=${DESTDIR}/var/lib/containers/sigstore
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
GO ?= go
GOBIN := $(shell $(GO) env GOBIN)
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
# Required for integration-tests to detect they are running inside a specific
# container image. Env. var defined in image, make does not automatically
# pass to children unless explicitly exported
export container_magic
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
@@ -47,10 +43,8 @@ ifeq ($(DEBUG), 1)
override GOGCFLAGS += -N -l
endif
ifeq ($(GOOS), linux)
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
GO_DYN_FLAGS="-buildmode=pie"
endif
ifeq ($(shell $(GO) env GOOS), linux)
GO_DYN_FLAGS="-buildmode=pie"
endif
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
@@ -68,15 +62,12 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
EXTRA_LDFLAGS ?=
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
MANPAGES_MD = $(wildcard docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG)
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
BUILDTAGS += $(LOCAL_BUILD_TAGS)
ifeq ($(DISABLE_CGO), 1)
@@ -87,17 +78,14 @@ endif
# Note: Uses the -N -l go compiler options to disable compiler optimizations
# and inlining. Using these build options allows you to subsequently
# use source debugging tools like delve.
all: bin/skopeo docs
all: binary docs-in-container
help:
@echo "Usage: make <target>"
@echo
@echo "Defaults to building bin/skopeo and docs"
@echo
@echo " * 'install' - Install binaries and documents to system locations"
@echo " * 'binary' - Build skopeo with a container"
@echo " * 'static' - Build statically linked binary"
@echo " * 'bin/skopeo' - Build skopeo locally"
@echo " * 'binary-local' - Build skopeo locally"
@echo " * 'test-unit' - Execute unit tests"
@echo " * 'test-integration' - Execute integration tests"
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
@@ -110,37 +98,25 @@ help:
binary: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make bin/skopeo $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run \
-f channel:nixos-20.09 nix-prefetch-git \
-c nix-prefetch-git \
--no-deepClone \
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
static:
@nix build -f nix/
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
binary-static: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
# Build w/o using containers
.PHONY: bin/skopeo
bin/skopeo:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${SKOPEO_LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
bin/skopeo.%:
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO) build $(MOD_VENDOR) ${SKOPEO_LDFLAGS} -tags "containers_image_openpgp $(BUILDTAGS)" -o $@ ./cmd/skopeo
local-cross: bin/skopeo.darwin.amd64 bin/skopeo.linux.arm bin/skopeo.linux.arm64 bin/skopeo.windows.386.exe bin/skopeo.windows.amd64.exe
binary-local:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
binary-local-static:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
build-container:
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
$(MANPAGES): %: %.md
sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
docs: $(MANPAGES)
@@ -150,26 +126,26 @@ docs-in-container:
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
clean:
rm -rf bin docs/*.1
rm -f skopeo docs/*.1
install: install-binary install-docs install-completions
install -d -m 755 ${SIGSTOREDIR}
install -d -m 755 ${CONTAINERSCONFDIR}
install -m 644 default-policy.json ${CONTAINERSCONFDIR}/policy.json
install -d -m 755 ${CONTAINERSSYSCONFIGDIR}
install -m 644 default-policy.json ${CONTAINERSSYSCONFIGDIR}/policy.json
install -d -m 755 ${REGISTRIESDDIR}
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
install-binary: bin/skopeo
install -d -m 755 ${BINDIR}
install -m 755 bin/skopeo ${BINDIR}/skopeo
install-binary: ./skopeo
install -d -m 755 ${INSTALLDIR}
install -m 755 skopeo ${INSTALLDIR}/skopeo
install-docs: docs
install -d -m 755 ${MANDIR}/man1
install -m 644 docs/*.1 ${MANDIR}/man1
install -d -m 755 ${MANINSTALLDIR}/man1
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
install-completions:
install -m 755 -d ${BASHCOMPLETIONSDIR}
install -m 644 completions/bash/skopeo ${BASHCOMPLETIONSDIR}/skopeo
install -m 755 -d ${BASHINSTALLDIR}
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
shell: build-container
$(CONTAINER_RUN) bash
@@ -178,11 +154,7 @@ check: validate test-unit test-integration test-system
# The tests can run out of entropy and block in containers, so replace /dev/random.
test-integration: build-container
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-integration-local'
# Intended for CI, shortcut 'build-container' since already running inside container.
test-integration-local:
hack/make.sh test-integration
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
# complicated set of options needed to run podman-in-podman
test-system: build-container
@@ -190,15 +162,11 @@ test-system: build-container
$(CONTAINER_CMD) --privileged \
-v $$DTEMP:/var/lib/containers:Z -v /run/systemd/journal/socket:/run/systemd/journal/socket \
"$(IMAGE)" \
bash -c 'BUILDTAGS="$(BUILDTAGS)" $(MAKE) test-system-local'; \
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
rc=$$?; \
$(RM) -rf $$DTEMP; \
exit $$rc
# Intended for CI, shortcut 'build-container' since already running inside container.
test-system-local:
hack/make.sh test-system
test-unit: build-container
# Just call (make test unit-local) here instead of worrying about environment differences
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'

View File

@@ -19,7 +19,6 @@ Skopeo works with API V2 container image registries such as [docker.io](https://
For example you can copy images from one registry to another, without requiring privilege.
* Inspecting a remote image showing its properties including its layers, without requiring you to pull the image to the host.
* Deleting an image from an image repository.
* Syncing an external image repository to an internal registry for air-gapped deployments.
* When required by the repository, skopeo can pass the appropriate credentials and certificates for authentication.
Skopeo operates on the following image and repository types:
@@ -122,7 +121,7 @@ $ skopeo inspect --config docker://registry.fedoraproject.org/fedora:latest | j
]
}
```
#### Show unverified image's digest
#### Show unverifed image's digest
```console
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest'
"sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9"
@@ -137,7 +136,7 @@ $ skopeo inspect docker://registry.fedoraproject.org/fedora:latest | jq '.Digest
* Container Storage backends
- [github.com/containers/storage](https://github.com/containers/storage) (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
- github.com/containers/storage (Backend for [Podman](https://podman.io), [CRI-O](https://cri-o.io), [Buildah](https://buildah.io) and friends)
- Docker daemon storage
@@ -155,22 +154,17 @@ $ skopeo copy oci:busybox_ocilayout:latest dir:existingemptydirectory
$ skopeo delete docker://localhost:5000/imagename:latest
```
## Syncing registries
```console
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
```
## Authenticating to a registry
#### Private registries with authentication
skopeo uses credentials from the --creds (for skopeo inspect|delete) or --src-creds|--dest-creds (for skopeo copy) flags, if set; otherwise it uses configuration set by skopeo login, podman login, buildah login, or docker login.
```console
$ skopeo login --username USER myregistrydomain.com:5000
$ skopeo login --user USER docker://myregistrydomain.com:5000
Password:
$ skopeo inspect docker://myregistrydomain.com:5000/busybox
{"Tag":"latest","Digest":"sha256:473bb2189d7b913ed7187a33d11e743fdc2f88931122a44d91a301b64419f092","RepoTags":["latest"],"Comment":"","Created":"2016-01-15T18:06:41.282540103Z","ContainerConfig":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"DockerVersion":"1.8.3","Author":"","Config":{"Hostname":"aded96b43f48","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"9e77fef7a1c9f989988c06620dabc4020c607885b959a2cbd7c2283c91da3e33","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"Architecture":"amd64","Os":"linux"}
$ skopeo logout myregistrydomain.com:5000
$ skopeo logout docker://myregistrydomain.com:5000
```
#### Using --creds directly
@@ -195,20 +189,6 @@ Contributing
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
## Commands
| Command | Description |
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
| [skopeo-logout(1)](/docs/skopeo-logout.1.md) | Logout of a container registry. |
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
License
-
skopeo is licensed under the Apache License, Version 2.0. See

View File

@@ -4,29 +4,27 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/spf13/cobra"
encconfig "github.com/containers/ocicrypt/config"
enchelpers "github.com/containers/ocicrypt/helpers"
"github.com/spf13/cobra"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type copyOptions struct {
global *globalOptions
srcImage *imageOptions
destImage *imageDestOptions
retryOpts *retry.RetryOptions
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
digestFile string // Write digest to this file
format optionalString // Force conversion of the image to a specified format
quiet bool // Suppress output information when copying images
all bool // Copy all of the images if the source is a list
@@ -39,11 +37,9 @@ func copyCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := copyOptions{global: global,
srcImage: srcOpts,
destImage: destOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
@@ -56,20 +52,18 @@ Supported transports:
See skopeo(1) section "IMAGE NAMES" for the expected format
`, strings.Join(transports.ListNames(), ", ")),
RunE: commandAction(opts.run),
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
Example: `skopeo copy --sign-by dev@example.com container-storage:example/busybox:streaming docker://example/busybox:gold`,
}
adjustUsage(cmd)
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)`)
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
@@ -113,9 +107,15 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
var manifestType string
if opts.format.present {
manifestType, err = parseManifestFormat(opts.format.value)
if err != nil {
return err
switch opts.format.value {
case "oci":
manifestType = imgspecv1.MediaTypeImageManifest
case "v2s1":
manifestType = manifest.DockerV2Schema1SignedMediaType
case "v2s2":
manifestType = manifest.DockerV2Schema2MediaType
default:
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
}
}
@@ -178,31 +178,17 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
decConfig = cc.DecryptConfig
}
return retry.RetryIfNecessary(ctx, func() error {
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
if err != nil {
return err
}
if opts.digestFile != "" {
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return err
}
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
}
}
return nil
}, opts.retryOpts)
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
return err
}

View File

@@ -6,26 +6,22 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/spf13/cobra"
)
type deleteOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func deleteCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := deleteOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "delete [command options] IMAGE-NAME",
@@ -42,7 +38,6 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -68,8 +63,5 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
return retry.RetryIfNecessary(ctx, func() error {
return ref.DeleteImage(ctx, sys)
}, opts.retryOpts)
return ref.DeleteImage(ctx, sys)
}

View File

@@ -4,42 +4,47 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"text/template"
"time"
"github.com/containers/common/pkg/report"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/skopeo/cmd/skopeo/inspect"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// inspectOutput is the output format of (skopeo inspect), primarily so that we can format it with a simple json.MarshalIndent.
type inspectOutput struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}
type inspectOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
format string
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
global *globalOptions
image *imageOptions
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
}
func inspectCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := inspectOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "inspect [command options] IMAGE-NAME",
@@ -50,38 +55,25 @@ Supported transports:
See skopeo(1) section "IMAGE NAMES" for the expected format
`, strings.Join(transports.ListNames(), ", ")),
RunE: commandAction(opts.run),
Example: `skopeo inspect docker://registry.fedoraproject.org/fedora
skopeo inspect --config docker://docker.io/alpine
skopeo inspect --format "Name: {{.Name}} Digest: {{.Digest}}" docker://registry.access.redhat.com/ubi8`,
RunE: commandAction(opts.run),
Example: `skopeo inspect docker://docker.io/fedora`,
}
adjustUsage(cmd)
flags := cmd.Flags()
flags.BoolVar(&opts.raw, "raw", false, "output raw manifest or configuration")
flags.BoolVar(&opts.config, "config", false, "output configuration")
flags.StringVarP(&opts.format, "format", "f", "", "Format the output to a Go template")
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
var (
rawManifest []byte
src types.ImageSource
imgInspect *types.ImageInspectInfo
data []interface{}
)
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
if len(args) != 1 {
return errors.New("Exactly one argument expected")
}
if opts.raw && opts.format != "" {
return errors.New("raw output does not support format option")
}
imageName := args[0]
if err := reexecIfNecessaryForImages(imageName); err != nil {
@@ -93,11 +85,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return err
}
if err := retry.RetryIfNecessary(ctx, func() error {
src, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error parsing image name %q", imageName)
src, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
return fmt.Errorf("Error parsing image name %q: %v", imageName, err)
}
defer func() {
@@ -106,11 +96,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
}()
if err := retry.RetryIfNecessary(ctx, func() error {
rawManifest, _, err = src.GetManifest(ctx, nil)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error retrieving manifest for image")
rawManifest, _, err := src.GetManifest(ctx, nil)
if err != nil {
return fmt.Errorf("Error retrieving manifest for image: %v", err)
}
if opts.raw && !opts.config {
@@ -118,65 +106,45 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
if err != nil {
return fmt.Errorf("Error writing manifest to standard output: %v", err)
}
return nil
}
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil))
if err != nil {
return errors.Wrapf(err, "Error parsing manifest for image")
return fmt.Errorf("Error parsing manifest for image: %v", err)
}
if opts.config && opts.raw {
var configBlob []byte
if err := retry.RetryIfNecessary(ctx, func() error {
configBlob, err = img.ConfigBlob(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading configuration blob")
configBlob, err := img.ConfigBlob(ctx)
if err != nil {
return fmt.Errorf("Error reading configuration blob: %v", err)
}
_, err = stdout.Write(configBlob)
if err != nil {
return errors.Wrapf(err, "Error writing configuration blob to standard output")
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
}
return nil
} else if opts.config {
var config *v1.Image
if err := retry.RetryIfNecessary(ctx, func() error {
config, err = img.OCIConfig(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
}
if report.IsJSON(opts.format) || opts.format == "" {
var out []byte
out, err = json.MarshalIndent(config, "", " ")
if err == nil {
fmt.Fprintf(stdout, "%s\n", string(out))
}
} else {
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
data = append(data, config)
err = printTmpl(row, data)
}
config, err := img.OCIConfig(ctx)
if err != nil {
return errors.Wrapf(err, "Error writing OCI-formatted configuration data to standard output")
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
}
err = json.NewEncoder(stdout).Encode(config)
if err != nil {
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
}
return nil
}
if err := retry.RetryIfNecessary(ctx, func() error {
imgInspect, err = img.Inspect(ctx)
return err
}, opts.retryOpts); err != nil {
imgInspect, err := img.Inspect(ctx)
if err != nil {
return err
}
outputData := inspect.Output{
outputData := inspectOutput{
Name: "", // Set below if DockerReference() is known
Tag: imgInspect.Tag,
// Digest is set below.
RepoTags: []string{}, // Possibly overridden for docker.Transport.
RepoTags: []string{}, // Possibly overriden for docker.Transport.
Created: imgInspect.Created,
DockerVersion: imgInspect.DockerVersion,
Labels: imgInspect.Labels,
@@ -187,7 +155,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
outputData.Digest, err = manifest.Digest(rawManifest)
if err != nil {
return errors.Wrapf(err, "Error computing manifest digest")
return fmt.Errorf("Error computing manifest digest: %v", err)
}
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
outputData.Name = dockerRef.Name()
@@ -205,35 +173,15 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
// action is not allowed.
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
return errors.Wrapf(err, "Error determining repository tags")
return fmt.Errorf("Error determining repository tags: %v", err)
}
logrus.Warnf("Registry disallows tag list retrieval; skipping")
}
}
if report.IsJSON(opts.format) || opts.format == "" {
out, err := json.MarshalIndent(outputData, "", " ")
if err == nil {
fmt.Fprintf(stdout, "%s\n", string(out))
}
return err
}
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
data = append(data, outputData)
return printTmpl(row, data)
}
func inspectNormalize(row string) string {
r := strings.NewReplacer(
".ImageID", ".Image",
)
return r.Replace(row)
}
func printTmpl(row string, data []interface{}) error {
t, err := template.New("skopeo inspect").Parse(row)
out, err := json.MarshalIndent(outputData, "", " ")
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
return t.Execute(w, data)
fmt.Fprintf(stdout, "%s\n", string(out))
return nil
}

View File

@@ -1,23 +0,0 @@
package inspect
import (
"time"
digest "github.com/opencontainers/go-digest"
)
// Output is the output format of (skopeo inspect),
// primarily so that we can format it with a simple json.MarshalIndent.
type Output struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}

View File

@@ -7,7 +7,6 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/pkg/blobinfocache"
@@ -18,19 +17,16 @@ import (
)
type layersOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func layersCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := layersOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Hidden: true,
@@ -42,7 +38,6 @@ func layersCmd(global *globalOptions) *cobra.Command {
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -65,20 +60,12 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
cache := blobinfocache.DefaultCache(sys)
var (
rawSource types.ImageSource
src types.ImageCloser
)
if err = retry.RetryIfNecessary(ctx, func() error {
rawSource, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
rawSource, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
return err
}
if err = retry.RetryIfNecessary(ctx, func() error {
src, err = image.FromSource(ctx, sys, rawSource)
return err
}, opts.retryOpts); err != nil {
src, err := image.FromSource(ctx, sys, rawSource)
if err != nil {
if closeErr := rawSource.Close(); closeErr != nil {
return errors.Wrapf(err, " (close error: %v)", closeErr)
}
@@ -142,14 +129,8 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}()
for _, bd := range blobDigests {
var (
r io.ReadCloser
blobSize int64
)
if err = retry.RetryIfNecessary(ctx, func() error {
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
return err
}, opts.retryOpts); err != nil {
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
if err != nil {
return err
}
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
@@ -160,11 +141,8 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}
}
var manifest []byte
if err = retry.RetryIfNecessary(ctx, func() error {
manifest, _, err = src.Manifest(ctx)
return err
}, opts.retryOpts); err != nil {
manifest, _, err := src.Manifest(ctx)
if err != nil {
return err
}
if err := dest.PutManifest(ctx, manifest, nil); err != nil {

View File

@@ -7,7 +7,6 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
@@ -23,20 +22,17 @@ type tagListOutput struct {
}
type tagsOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func tagsCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := tagsOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "list-tags [command options] REPOSITORY-NAME",
@@ -55,7 +51,6 @@ See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -123,12 +118,8 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
var repositoryName string
var tagListing []string
if err = retry.RetryIfNecessary(ctx, func() error {
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
return err
}, opts.retryOpts); err != nil {
repositoryName, tagListing, err := listDockerTags(ctx, sys, imgRef)
if err != nil {
return err
}

View File

@@ -17,8 +17,6 @@ import (
// and will be populated by the Makefile
var gitCommit = ""
var defaultUserAgent = "skopeo/" + version.Version
type globalOptions struct {
debug bool // Enable debug output
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
@@ -145,7 +143,6 @@ func (opts *globalOptions) newSystemContext() *types.SystemContext {
VariantChoice: opts.overrideVariant,
SystemRegistriesConfPath: opts.registriesConfPath,
BigFilesTemporaryDir: opts.tmpDir,
DockerRegistryUserAgent: defaultUserAgent,
}
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
if opts.tlsVerify.present {

View File

@@ -23,10 +23,7 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
// Default state
opts, _ := fakeGlobalOptions(t, []string{})
res := opts.newSystemContext()
assert.Equal(t, &types.SystemContext{
// User-Agent is set by default.
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
// Set everything to non-default values.
opts, _ = fakeGlobalOptions(t, []string{
"--registries.d", "/srv/registries.d",
@@ -46,6 +43,5 @@ func TestGlobalOptionsNewSystemContext(t *testing.T) {
BigFilesTemporaryDir: "/srv",
SystemRegistriesConfPath: "/srv/registries.conf",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerRegistryUserAgent: defaultUserAgent,
}, res)
}

View File

@@ -17,8 +17,8 @@ func TestManifestDigest(t *testing.T) {
}
// Error reading manifest
out, err := runSkopeo("manifest-digest", "/this/does/not/exist")
assertTestFailed(t, out, err, "/this/does/not/exist")
out, err := runSkopeo("manifest-digest", "/this/doesnt/exist")
assertTestFailed(t, out, err, "/this/doesnt/exist")
// Error computing manifest
out, err = runSkopeo("manifest-digest", "fixtures/v2s1-invalid-signatures.manifest.json")

View File

@@ -58,8 +58,8 @@ func TestStandaloneSign(t *testing.T) {
// Error reading manifest
out, err := runSkopeo("standalone-sign", "-o", "/dev/null",
"/this/does/not/exist", dockerReference, fixturesTestKeyFingerprint)
assertTestFailed(t, out, err, "/this/does/not/exist")
"/this/doesnt/exist", dockerReference, fixturesTestKeyFingerprint)
assertTestFailed(t, out, err, "/this/doesnt/exist")
// Invalid Docker reference
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
@@ -117,14 +117,14 @@ func TestStandaloneVerify(t *testing.T) {
}
// Error reading manifest
out, err := runSkopeo("standalone-verify", "/this/does/not/exist",
out, err := runSkopeo("standalone-verify", "/this/doesnt/exist",
dockerReference, fixturesTestKeyFingerprint, signaturePath)
assertTestFailed(t, out, err, "/this/does/not/exist")
assertTestFailed(t, out, err, "/this/doesnt/exist")
// Error reading signature
out, err = runSkopeo("standalone-verify", manifestPath,
dockerReference, fixturesTestKeyFingerprint, "/this/does/not/exist")
assertTestFailed(t, out, err, "/this/does/not/exist")
dockerReference, fixturesTestKeyFingerprint, "/this/doesnt/exist")
assertTestFailed(t, out, err, "/this/doesnt/exist")
// Error verifying signature
out, err = runSkopeo("standalone-verify", manifestPath,
@@ -151,8 +151,8 @@ func TestUntrustedSignatureDump(t *testing.T) {
// Error reading manifest
out, err := runSkopeo("untrusted-signature-dump-without-verification",
"/this/does/not/exist")
assertTestFailed(t, out, err, "/this/does/not/exist")
"/this/doesnt/exist")
assertTestFailed(t, out, err, "/this/doesnt/exist")
// Error reading signature (input is not a signature)
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")

View File

@@ -11,14 +11,12 @@ import (
"regexp"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -27,27 +25,24 @@ import (
// syncOptions contains information retrieved from the skopeo sync command line.
type syncOptions struct {
global *globalOptions // Global (not command dependent) skopeo options
global *globalOptions // Global (not command dependant) skopeo options
srcImage *imageOptions // Source image options
destImage *imageDestOptions // Destination image options
retryOpts *retry.RetryOptions
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
format optionalString // Force conversion of the image to a specified format
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
all bool // Copy all of the images if an image in the source is a list
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
}
// repoDescriptor contains information of a single repository used as a sync source.
type repoDescriptor struct {
DirBasePath string // base path when source is 'dir'
ImageRefs []types.ImageReference // List of tagged image found for the repository
Context *types.SystemContext // SystemContext for the sync command
DirBasePath string // base path when source is 'dir'
TaggedImages []types.ImageReference // List of tagged image found for the repository
Context *types.SystemContext // SystemContext for the sync command
}
// tlsVerifyConfig is an implementation of the Unmarshaler interface, used to
// tlsVerify is an implementation of the Unmarshaler interface, used to
// customize the unmarshaling behaviour of the tls-verify YAML key.
type tlsVerifyConfig struct {
skip types.OptionalBool // skip TLS verification check (false by default)
@@ -56,7 +51,7 @@ type tlsVerifyConfig struct {
// registrySyncConfig contains information about a single registry, read from
// the source YAML file
type registrySyncConfig struct {
Images map[string][]string // Images map images name to slices with the images' references (tags, digests)
Images map[string][]string // Images map images name to slices with the images' tags
ImagesByTagRegex map[string]string `yaml:"images-by-tag-regex"` // Images map images name to regular expression with the images' tags
Credentials types.DockerAuthConfig // Username and password used to authenticate with the registry
TLSVerify tlsVerifyConfig `yaml:"tls-verify"` // TLS verification mode (enabled by default)
@@ -70,13 +65,11 @@ func syncCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := syncOptions{
global: global,
srcImage: srcOpts,
destImage: &imageDestOptions{imageOptions: destOpts},
retryOpts: retryOpts,
}
cmd := &cobra.Command{
@@ -96,19 +89,16 @@ See skopeo-sync(1) for details.
flags := cmd.Flags()
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source)`)
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
// UnmarshalYAML is the implementation of the Unmarshaler interface method
// unmarshalYAML is the implementation of the Unmarshaler interface method
// method for the tlsVerifyConfig type.
// It unmarshals the 'tls-verify' YAML key so that, when they key is not
// specified, tls verification is enforced.
@@ -240,7 +230,7 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
return sourceReferences, nil
}
// imagesToCopyFromDir builds a list of image references from the images found
// imagesTopCopyFromDir builds a list of image references from the images found
// in the source directory.
// It returns an image reference slice with as many elements as the images found
// and any error encountered.
@@ -270,11 +260,11 @@ func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
return sourceReferences, nil
}
// imagesToCopyFromRegistry builds a list of repository descriptors from the images
// imagesTopCopyFromDir builds a list of repository descriptors from the images
// in a registry configuration.
// It returns a repository descriptors slice with as many elements as the images
// found and any error encountered. Each element of the slice is a list of
// image references, to be used as sync source.
// tagged image references, to be used as sync source.
func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourceCtx types.SystemContext) ([]repoDescriptor, error) {
serverCtx := &sourceCtx
// override ctx with per-registryName options
@@ -282,11 +272,10 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
serverCtx.DockerDaemonCertPath = cfg.CertDir
serverCtx.DockerDaemonInsecureSkipTLSVerify = (cfg.TLSVerify.skip == types.OptionalBoolTrue)
serverCtx.DockerInsecureSkipTLSVerify = cfg.TLSVerify.skip
if cfg.Credentials != (types.DockerAuthConfig{}) {
serverCtx.DockerAuthConfig = &cfg.Credentials
}
serverCtx.DockerAuthConfig = &cfg.Credentials
var repoDescList []repoDescriptor
for imageName, refs := range cfg.Images {
for imageName, tags := range cfg.Images {
repoLogger := logrus.WithFields(logrus.Fields{
"repo": imageName,
"registry": registryName,
@@ -301,37 +290,24 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
repoLogger.Info("Processing repo")
var sourceReferences []types.ImageReference
if len(refs) != 0 {
for _, ref := range refs {
tagLogger := logrus.WithFields(logrus.Fields{"ref": ref})
var named reference.Named
// first try as digest
if d, err := digest.Parse(ref); err == nil {
named, err = reference.WithDigest(repoRef, d)
if err != nil {
tagLogger.Error("Error processing ref, skipping")
logrus.Error(err)
continue
}
} else {
tagLogger.Debugf("Ref was not a digest, trying as a tag: %s", err)
named, err = reference.WithTag(repoRef, ref)
if err != nil {
tagLogger.Error("Error parsing ref, skipping")
logrus.Error(err)
continue
}
}
imageRef, err := docker.NewReference(named)
if len(tags) != 0 {
for _, tag := range tags {
tagLogger := logrus.WithFields(logrus.Fields{"tag": tag})
taggedRef, err := reference.WithTag(repoRef, tag)
if err != nil {
tagLogger.Error("Error processing ref, skipping")
tagLogger.Error("Error parsing tag, skipping")
logrus.Error(err)
continue
}
imageRef, err := docker.NewReference(taggedRef)
if err != nil {
tagLogger.Error("Error processing tag, skipping")
logrus.Errorf("Error getting image reference: %s", err)
continue
}
sourceReferences = append(sourceReferences, imageRef)
}
} else { // len(refs) == 0
} else { // len(tags) == 0
repoLogger.Info("Querying registry for image tags")
sourceReferences, err = imagesToCopyFromRepo(serverCtx, repoRef)
if err != nil {
@@ -342,12 +318,12 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
}
if len(sourceReferences) == 0 {
repoLogger.Warnf("No refs to sync found")
repoLogger.Warnf("No tags to sync found")
continue
}
repoDescList = append(repoDescList, repoDescriptor{
ImageRefs: sourceReferences,
Context: serverCtx})
TaggedImages: sourceReferences,
Context: serverCtx})
}
for imageName, tagRegex := range cfg.ImagesByTagRegex {
@@ -396,12 +372,12 @@ func imagesToCopyFromRegistry(registryName string, cfg registrySyncConfig, sourc
}
if len(sourceReferences) == 0 {
repoLogger.Warnf("No refs to sync found")
repoLogger.Warnf("No tags to sync found")
continue
}
repoDescList = append(repoDescList, repoDescriptor{
ImageRefs: sourceReferences,
Context: serverCtx})
TaggedImages: sourceReferences,
Context: serverCtx})
}
return repoDescList, nil
@@ -434,13 +410,13 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
if err != nil {
return nil, errors.Wrapf(err, "Cannot obtain a valid image reference for transport %q and reference %q", docker.Transport.Name(), named.String())
}
desc.ImageRefs = []types.ImageReference{srcRef}
desc.TaggedImages = []types.ImageReference{srcRef}
} else {
desc.ImageRefs, err = imagesToCopyFromRepo(sourceCtx, named)
desc.TaggedImages, err = imagesToCopyFromRepo(sourceCtx, named)
if err != nil {
return descriptors, err
}
if len(desc.ImageRefs) == 0 {
if len(desc.TaggedImages) == 0 {
return descriptors, errors.Errorf("No images to sync found in %q", source)
}
}
@@ -456,11 +432,11 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
}
desc.DirBasePath = source
var err error
desc.ImageRefs, err = imagesToCopyFromDir(source)
desc.TaggedImages, err = imagesToCopyFromDir(source)
if err != nil {
return descriptors, err
}
if len(desc.ImageRefs) == 0 {
if len(desc.TaggedImages) == 0 {
return descriptors, errors.Errorf("No images to sync found in %q", source)
}
descriptors = append(descriptors, desc)
@@ -528,33 +504,14 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
}
imageListSelection := copy.CopySystemImage
if opts.all {
imageListSelection = copy.CopyAllImages
}
sourceCtx, err := opts.srcImage.newSystemContext()
if err != nil {
return err
}
var manifestType string
if opts.format.present {
manifestType, err = parseManifestFormat(opts.format.value)
if err != nil {
return err
}
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
sourceArg := args[0]
var srcRepoList []repoDescriptor
if err = retry.RetryIfNecessary(ctx, func() error {
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
return err
}, opts.retryOpts); err != nil {
srcRepoList, err := imagesToCopy(sourceArg, opts.source, sourceCtx)
if err != nil {
return err
}
@@ -564,20 +521,20 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
imagesNumber := 0
options := copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: os.Stdout,
DestinationCtx: destinationCtx,
ImageListSelection: imageListSelection,
OptimizeDestinationImageAlreadyExists: true,
ForceManifestMIMEType: manifestType,
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: os.Stdout,
DestinationCtx: destinationCtx,
}
for _, srcRepo := range srcRepoList {
options.SourceCtx = srcRepo.Context
for counter, ref := range srcRepo.ImageRefs {
for counter, ref := range srcRepo.TaggedImages {
var destSuffix string
switch ref.Transport() {
case docker.Transport:
@@ -604,13 +561,11 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
logrus.WithFields(logrus.Fields{
"from": transports.ImageName(ref),
"to": transports.ImageName(destRef),
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
}).Infof("Copying image tag %d/%d", counter+1, len(srcRepo.TaggedImages))
if err = retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
if err != nil {
return errors.Wrapf(err, "Error copying tag %q", transports.ImageName(ref))
}
imagesNumber++
}

View File

@@ -2,17 +2,13 @@ package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -60,7 +56,6 @@ type dockerImageOptions struct {
shared *sharedImageOptions // May be shared across several imageOptions instances.
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
credsOption optionalString // username[:password] for accessing a registry
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
noCreds bool // Access the registry anonymously
@@ -96,7 +91,6 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPre
f := fs.VarPF(newOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
f.Hidden = true
}
fs.Var(newOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
optionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)")
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
@@ -114,17 +108,6 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
return fs, opts
}
type retryOptions struct {
maxRetry int // The number of times to possibly retry
}
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
opts := retry.RetryOptions{}
fs := pflag.FlagSet{}
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
return fs, &opts
}
// newSystemContext returns a *types.SystemContext corresponding to opts.
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
@@ -155,9 +138,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return nil, err
}
}
if opts.registryToken.present {
ctx.DockerBearerRegistryToken = opts.registryToken.value
}
if opts.noCreds {
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
}
@@ -165,7 +145,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return ctx, nil
}
// imageDestOptions is a superset of imageOptions specialized for image destinations.
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport
@@ -249,21 +229,6 @@ func parseImageSource(ctx context.Context, opts *imageOptions, name string) (typ
return ref.NewImageSource(ctx, sys)
}
// parseManifestFormat parses format parameter for copy and sync command.
// It returns string value to use as manifest MIME type
func parseManifestFormat(manifestFormat string) (string, error) {
switch manifestFormat {
case "oci":
return imgspecv1.MediaTypeImageManifest, nil
case "v2s1":
return manifest.DockerV2Schema1SignedMediaType, nil
case "v2s2":
return manifest.DockerV2Schema2MediaType, nil
default:
return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", manifestFormat)
}
}
// usageTemplate returns the usage template for skopeo commands
// This blocks the displaying of the global options. The main skopeo
// command should not use this.

View File

@@ -4,9 +4,7 @@ import (
"os"
"testing"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -39,9 +37,7 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
// Set everything to non-default values.
opts = fakeImageOptions(t, "dest-", []string{
@@ -58,7 +54,6 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -72,11 +67,9 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
DockerRegistryUserAgent: defaultUserAgent,
BigFilesTemporaryDir: "/srv",
}, res)
@@ -134,9 +127,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
res, err := opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{}, res)
oldXRD, hasXRD := os.LookupEnv("REGISTRY_AUTH_FILE")
defer func() {
@@ -156,10 +147,7 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
})
res, err = opts.newSystemContext()
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
AuthFilePath: authFile,
DockerRegistryUserAgent: defaultUserAgent,
}, res)
assert.Equal(t, &types.SystemContext{AuthFilePath: authFile}, res)
// Set everything to non-default values.
opts = fakeImageDestOptions(t, "dest-", []string{
@@ -176,7 +164,6 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -190,11 +177,9 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
DockerRegistryUserAgent: defaultUserAgent,
DirForceCompress: true,
BigFilesTemporaryDir: "/srv",
}, res)
@@ -205,38 +190,6 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
assert.Error(t, err)
}
func TestParseManifestFormat(t *testing.T) {
for _, testCase := range []struct {
formatParam string
expectedManifestType string
expectErr bool
}{
{"oci",
imgspecv1.MediaTypeImageManifest,
false},
{"v2s1",
manifest.DockerV2Schema1SignedMediaType,
false},
{"v2s2",
manifest.DockerV2Schema2MediaType,
false},
{"",
"",
true},
{"badValue",
"",
true},
} {
manifestType, err := parseManifestFormat(testCase.formatParam)
if testCase.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
assert.Equal(t, manifestType, testCase.expectedManifestType)
}
}
// since there is a shared authfile image option and a non-shared (prefixed) one, make sure the override logic
// works correctly.
func TestImageOptionsAuthfileOverride(t *testing.T) {
@@ -276,8 +229,7 @@ func TestImageOptionsAuthfileOverride(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, &types.SystemContext{
AuthFilePath: testCase.expectedAuthfilePath,
DockerRegistryUserAgent: defaultUserAgent,
AuthFilePath: testCase.expectedAuthfilePath,
}, res)
}
}

View File

@@ -49,8 +49,6 @@ _skopeo_copy() {
--dest-tls-verify
--src-daemon-host
--dest-daemon-host
--src-registry-token
--dest-registry-token
"
local boolean_options="
@@ -70,50 +68,11 @@ _skopeo_copy() {
_complete_ "$options_with_args" "$boolean_options" "$transports"
}
_skopeo_sync() {
local options_with_args="
--authfile
--dest
--dest-authfile
--dest-cert-
--dest-creds
--dest-registry-token string
--format
--retry-times
--sign-by
--src
--src-authfile
--src-cert-dir
--src-creds
--src-registry-token
"
local boolean_options="
--all
--dest-no-creds
--dest-tls-verify
--remove-signatures
--scoped
--src-no-creds
--src-tls-verify
"
local transports
transports="
$(_skopeo_supported_transports "${FUNCNAME//"_skopeo_"/}")
"
_complete_ "$options_with_args" "$boolean_options" "$transports"
}
_skopeo_inspect() {
local options_with_args="
--authfile
--creds
--cert-dir
--format
--retry-times
--registry-token
"
local boolean_options="
--config
@@ -160,7 +119,6 @@ _skopeo_delete() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
@@ -177,14 +135,11 @@ _skopeo_delete() {
_skopeo_layers() {
local options_with_args="
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
--no-creds
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -194,7 +149,6 @@ _skopeo_list_repository_tags() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
@@ -232,7 +186,7 @@ _skopeo_logout() {
}
_skopeo_skopeo() {
# XXX: Changes here need to be reflected in the manually expanded
# XXX: Changes here need to be refleceted in the manually expanded
# string in the `case` statement below as well.
local options_with_args="
--policy
@@ -266,7 +220,7 @@ _skopeo_skopeo() {
)
case "$prev" in
# XXX: Changes here need to be reflected in $options_with_args as well.
# XXX: Changes here need to be refleceted in $options_with_args as well.
--policy|--registries.d|--override-arch|--override-os|--override-variant|--command-timeout)
return
;;
@@ -296,7 +250,7 @@ _cli_bash_autocomplete() {
local counter=1
while [ $counter -lt "$cword" ]; do
case "${words[$counter]}" in
skopeo|copy|sync|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
skopeo|copy|inspect|delete|manifest-digest|standalone-sign|standalone-verify|help|h|list-repository-tags)
command="${words[$counter]//-/_}"
cpos=$counter
(( cpos++ ))

View File

@@ -1,91 +0,0 @@
#!/bin/bash
# This script is intended to be executed by automation or humans
# under a hack/get_ci_vm.sh context. Use under any other circumstances
# is unlikely to function.
set -e
if [[ -r "/etc/automation_environment" ]]; then
source /etc/automation_environment
source $AUTOMATION_LIB_PATH/common_lib.sh
else
(
echo "WARNING: It does not appear that containers/automation was installed."
echo " Functionality of most of ${BASH_SOURCE[0]} will be negatively"
echo " impacted."
) > /dev/stderr
fi
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
# GCE image-name compatible string representation of distribution _major_ version
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
# Combined to ease some usage
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
export "PATH=$PATH:$GOPATH/bin"
podmanmake() {
req_env_vars GOPATH SKOPEO_PATH SKOPEO_CI_CONTAINER_FQIN
warn "Accumulated technical-debt requires execution inside a --privileged container. This is very likely hiding bugs!"
showrun podman run -it --rm --privileged \
-e GOPATH=$GOPATH \
-v $GOPATH:$GOPATH:Z \
-w $SKOPEO_PATH \
$SKOPEO_CI_CONTAINER_FQIN \
make "$@"
}
_run_setup() {
if [[ "$OS_RELEASE_ID" == "fedora" ]]; then
# This is required as part of the standard Fedora VM setup
growpart /dev/sda 1
resize2fs /dev/sda1
# VM's come with the distro. skopeo pre-installed
dnf erase -y skopeo
else
die "Unknown/unsupported distro. $OS_REL_VER"
fi
}
_run_vendor() {
podmanmake vendor BUILDTAGS="$BUILDTAGS"
}
_run_build() {
podmanmake bin/skopeo BUILDTAGS="$BUILDTAGS"
}
_run_validate() {
podmanmake validate-local BUILDTAGS="$BUILDTAGS"
}
_run_unit() {
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
}
_run_integration() {
podmanmake test-integration-local BUILDTAGS="$BUILDTAGS"
}
_run_system() {
# Ensure we start with a clean-slate
podman system reset --force
# Executes with containers required for testing.
showrun make test-system-local BUILDTAGS="$BUILDTAGS"
}
req_env_vars SKOPEO_PATH BUILDTAGS
handler="_run_${1}"
if [ "$(type -t $handler)" != "function" ]; then
die "Unknown/Unsupported command-line argument '$1'"
fi
msg "************************************************************"
msg "Runner executing $1 on $OS_REL_VER"
msg "************************************************************"
cd "$SKOPEO_PATH"
$handler

View File

@@ -16,16 +16,9 @@ using the latest Fedora and then Skopeo is installed into them:
* quay.io/skopeo/upstream - This image is built using the latest code found in this GitHub repository. When someone creates a commit and pushes it, the image is created. Due to that the image changes frequently and is not guaranteed to be stable. Built with skopeoimage/upstream/Dockerfile.
* quay.io/skopeo/testing - This image is built using the latest version of Skopeo that is or was in updates testing for Fedora. At times this may be the same as the stable image. This container image will primarily be used by the development teams for verification testing when a new package is created. Built with skopeoimage/testing/Dockerfile.
## Multiarch images
Multiarch images are available for Skopeo upstream and stable versions. Supported architectures are `amd64`, `s390x`, `ppc64le`.
Available images are `quay.io/skopeo/upstream:master`, `quay.io/skopeo/stable:v1.2.0`, `quay.io/containers/skopeo:v1.2.0`.
Images can be used the same way as in a single architecture case, no extra setup is required. For samples see next chapter.
## Sample Usage
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
Although not required, it is suggested that [Podman](https://github.com/containers/libpod) be used with these container images.
```
# Get Help on Skopeo

View File

@@ -6,14 +6,14 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
@@ -27,7 +27,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -7,14 +7,14 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
@@ -28,7 +28,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -6,7 +6,7 @@
# This image can be used to create a secured container
# that runs safely with privileges within the container.
#
FROM registry.fedoraproject.org/fedora:33
FROM registry.fedoraproject.org/fedora:32
# Don't include container-selinux and remove
# directories used by yum that are just taking
@@ -29,12 +29,11 @@ mkdir /root/skopeo; \
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
export GOPATH=/root/skopeo; \
cd /root/skopeo/src/github.com/containers/skopeo; \
make bin/skopeo;\
make PREFIX=/usr install;\
make binary-local;\
make install;\
rm -rf /root/skopeo/*; \
yum -y remove git golang go-md2man make; \
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf
@@ -48,7 +47,7 @@ RUN echo skopeo:100000:65536 > /etc/subuid
RUN echo skopeo:100000:65536 > /etc/subgid
# Point to the Authorization file
ENV REGISTRY_AUTH_FILE=/tmp/auth.json
ENV REGISTRY_AUTH_FILE=/auth.json
# Set the entrypoint
ENTRYPOINT ["/usr/bin/skopeo"]

View File

@@ -15,9 +15,6 @@ Uses the system's trust policy to validate images, rejects images not trusted by
_destination-image_ use the "image name" format described above
_source-image_ and _destination-image_ are interpreted completely independently; e.g. the destination name does not
automatically inherit any parts of the source name.
## OPTIONS
**--all**
@@ -42,10 +39,6 @@ Path of the authentication file for the source registry. Uses path given by `--a
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
**--digestfile** _path_
After copying the image, write the digest of the resulting image to the file.
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
**--quiet, -q** suppress output information when copying images
@@ -54,29 +47,29 @@ After copying the image, write the digest of the resulting image to the file.
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--encryption-key** _Key_ a reference prefixed with the encryption protocol to use. The supported protocols are JWE, PGP and PKCS7. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file. This feature is still *experimental*.
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--decryption-key** _Key_ a reference required to perform decryption of container images. This should point to files which represent keys and/or certificates that can be used for decryption. Decryption will be tried with all keys. This feature is still *experimental*.
**--src-creds** _username[:password]_ for accessing the source registry.
**--src-creds** _username[:password]_ for accessing the source registry
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)
**--dest-creds** _username[:password]_ for accessing the destination registry.
**--dest-creds** _username[:password]_ for accessing the destination registry
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
**--src-no-creds** _bool-value_ Access the registry anonymously.
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
**--dest-no-creds** _bool-value_ Access the registry anonymously.
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
@@ -88,17 +81,8 @@ Existing signatures, if any, are preserved as well.
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
To just copy an image from one registry to another:
```sh
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
```
To copy the layers of the docker.io busybox image to a local directory:
```sh
$ mkdir -p /var/lib/images/busybox

View File

@@ -1,7 +1,7 @@
% skopeo-delete(1)
## NAME
skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garbage collector.
skopeo\-delete - Mark _image-name_ for deletion.
## SYNOPSIS
**skopeo delete** _image-name_
@@ -24,18 +24,16 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--no-creds** _bool-value_ Access the registry anonymously.
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
Mark image example/pause for deletion from the registry.example.com registry:

View File

@@ -1,61 +1,37 @@
% skopeo-inspect(1)
## NAME
skopeo\-inspect - Return low-level information about _image-name_ in a registry.
skopeo\-inspect - Return low-level information about _image-name_ in a registry
## SYNOPSIS
**skopeo inspect** [*options*] _image-name_
## DESCRIPTION
**skopeo inspect** [**--raw**] [**--config**] _image-name_
Return low-level information about _image-name_ in a registry
_image-name_ name of image to retrieve information about
**--raw** output raw manifest, default is to format in JSON
## OPTIONS
_image-name_ name of image to retrieve information about
**--authfile** _path_
**--config** output configuration in OCI format, default is to format in JSON
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
_image-name_ name of image to retrieve configuration for
**--cert-dir** _path_
**--config** **--raw** output configuration in raw format
Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
_image-name_ name of image to retrieve configuration for
**--config**
**--authfile** _path_
Output configuration in OCI format, default is to format in JSON format.
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_
**--creds** _username[:password]_ for accessing the registry
Username and password for accessing the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--format**, **-f**=*format*
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
Format the output using the given Go template.
The keys of the returned JSON can be used as the values for the --format flag (see examples below).
**--no-creds**
Access the registry anonymously.
**--raw**
Output raw manifest or config data depending on --config option.
The --format option is not supported with --raw option.
**--registry-token** _Bearer token_
Registry token for accessing the registry.
**--retry-times**
The number of times to retry; retry wait time will be exponentially increased based on the number of failed attempts.
**--tls-verify**
Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--no-creds** _bool-value_ Access the registry anonymously.
## EXAMPLES
@@ -66,14 +42,14 @@ $ skopeo inspect docker://docker.io/fedora
"Name": "docker.io/library/fedora",
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
"RepoTags": [
"20",
"21",
"22",
"23",
"24",
"heisenbug",
"latest",
"rawhide"
"20",
"21",
"22",
"23",
"24",
"heisenbug",
"latest",
"rawhide"
],
"Created": "2016-06-20T19:33:43.220526898Z",
"DockerVersion": "1.10.3",
@@ -81,24 +57,15 @@ $ skopeo inspect docker://docker.io/fedora
"Architecture": "amd64",
"Os": "linux",
"Layers": [
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
]
}
```
```
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
amd64
```
```
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
```
# SEE ALSO
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
## AUTHORS
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>

View File

@@ -1,7 +1,7 @@
% skopeo-list-tags(1)
## NAME
skopeo\-list\-tags - Return a list of tags for the transport-specific image repository.
skopeo\-list\-tags - Return a list of tags the transport-specific image repository
## SYNOPSIS
**skopeo list-tags** _repository-name_
@@ -15,16 +15,14 @@ Return a list of tags from _repository-name_ in a registry.
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--no-creds** _bool-value_ Access the registry anonymously.
**--registry-token** _Bearer token_ for accessing the registry.
## REPOSITORY NAMES
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.

View File

@@ -1,10 +1,10 @@
% skopeo-login(1)
## NAME
skopeo\-login - Login to a container registry.
skopeo\-login - Login to a container registry
## SYNOPSIS
**skopeo login** [*options*] *registry*
**skoepo login** [*options*] *registry*
## DESCRIPTION
**skopeo login** logs into a specified registry server with the correct username

View File

@@ -1,7 +1,7 @@
% skopeo-logout(1)
## NAME
skopeo\-logout - Logout of a container registry.
skopeo\-logout - Logout of a container registry
## SYNOPSIS
**skopeo logout** [*options*] *registry*

View File

@@ -1,7 +1,7 @@
% skopeo-manifest-digest(1)
## NAME
skopeo\-manifest\-digest - Compute a manifest digest for a manifest-file and write it to standard output.
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
## SYNOPSIS
**skopeo manifest-digest** _manifest-file_

View File

@@ -1,7 +1,7 @@
% skopeo-standalone-sign(1)
## NAME
skopeo\-standalone-sign - Debugging tool - Publish and sign an image in one step.
skopeo\-standalone-sign - Simple Sign an image
## SYNOPSIS
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_

View File

@@ -1,7 +1,7 @@
% skopeo-standalone-verify(1)
## NAME
skopeo\-standalone\-verify - Verify an image signature.
skopeo\-standalone\-verify - Verify an image signature
## SYNOPSIS
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_

View File

@@ -32,11 +32,6 @@ When the `--scoped` option is specified, images are prefixed with the source ima
name can be stored at _destination_.
## OPTIONS
**--all**
If one of the images in __src__ refers to a list of images, instead of copying just the image which matches the current OS and
architecture (subject to the use of the global --override-os, --override-arch and --override-variant options), attempt to copy all of
the images in the list, and the list itself.
**--authfile** _path_
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
@@ -54,8 +49,6 @@ Path of the authentication file for the destination registry. Uses path given by
**--dest** _transport_ Destination transport.
**--format, -f** _manifest-type_ Manifest Type (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source).
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
@@ -78,10 +71,6 @@ Path of the authentication file for the destination registry. Uses path given by
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container destination registry or daemon (defaults to true).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
### Synchronizing to a local directory
@@ -154,7 +143,6 @@ registry.example.com:
redis:
- "1.0"
- "2.0"
- "sha256:0000000000000000000000000000000011111111111111111111111111111111"
images-by-tag-regex:
nginx: ^1\.13\.[12]-alpine-perl$
credentials:
@@ -174,14 +162,14 @@ skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
```
This will copy the following images:
- Repository `registry.example.com/busybox`: all images, as no tags are specified.
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0" along with image with digest "sha256:0000000000000000000000000000000011111111111111111111111111111111".
- Repository `registry.example.com/redis`: images tagged "1.0" and "2.0".
- Repository `registry.example.com/nginx`: images tagged "1.13.1-alpine-perl" and "1.13.2-alpine-perl".
- Repository `quay.io/coreos/etcd`: images tagged "latest".
For the registry `registry.example.com`, the "john"/"this is a secret" credentials are used, with server TLS certificates located at `/home/john/certs`.
TLS verification is normally enabled, and it can be disabled setting `tls-verify` to `false`.
In the above example, TLS verification is enabled for `registry.example.com`, while is
In the above example, TLS verification is enabled for `reigstry.example.com`, while is
disabled for `quay.io`.
## SEE ALSO

View File

@@ -44,11 +44,6 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
**oci:**_path_**:**_tag_
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
**oci-archive:**_path_**:**_tag_
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
## OPTIONS
**--command-timeout** _duration_ Timeout for the command execution.

24
go.mod
View File

@@ -3,23 +3,25 @@ module github.com/containers/skopeo
go 1.12
require (
github.com/containers/common v0.38.12
github.com/containers/image/v5 v5.12.0
github.com/containers/ocicrypt v1.1.1
github.com/containers/storage v1.31.3
github.com/docker/docker v20.10.6+incompatible
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/containers/common v0.14.0
github.com/containers/image/v5 v5.5.1
github.com/containers/ocicrypt v1.0.2
github.com/containers/storage v1.20.2
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f
github.com/dsnet/compress v0.0.1 // indirect
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
github.com/opencontainers/runtime-spec v1.0.0 // indirect
github.com/pkg/errors v0.9.1
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.1.3
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
gopkg.in/yaml.v2 v2.4.0
golang.org/x/text v0.3.3 // indirect
gopkg.in/yaml.v2 v2.3.0
)

920
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by Red Hat skopeo developers who need
# to debug problems specifically related to Cirrus-CI automated testing.
# It requires that you have been granted prior access to create VMs in
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
# with supervision upon request.
set -e
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# Help detect if we were called by get_ci_vm container
GET_CI_VM="${GET_CI_VM:-0}"
in_get_ci_vm() {
if ((GET_CI_VM==0)); then
echo "Error: $1 is not intended for use in this context"
exit 2
fi
}
# get_ci_vm APIv1 container entrypoint calls into this script
# to obtain required repo. specific configuration options.
if [[ "$1" == "--config" ]]; then
in_get_ci_vm "$1"
cat <<EOF
DESTDIR="/var/tmp/go/src/github.com/containers/skopeo"
UPSTREAM_REPO="https://github.com/containers/skopeo.git"
GCLOUD_PROJECT="skopeo"
GCLOUD_IMGPROJECT="libpod-218412"
GCLOUD_CFG="skopeo"
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-f}"
GCLOUD_CPUS="2"
GCLOUD_MEMORY="4Gb"
GCLOUD_DISK="200"
EOF
elif [[ "$1" == "--setup" ]]; then
in_get_ci_vm "$1"
# get_ci_vm container entrypoint calls us with this option on the
# Cirrus-CI environment instance, to perform repo.-specific setup.
echo "+ Executing setup" > /dev/stderr
${GOSRC}/${SCRIPT_BASE}/runner.sh setup
else
# Create and access VM for specified Cirrus-CI task
mkdir -p $HOME/.config/gcloud/ssh
podman run -it --rm \
--tz=local \
-e NAME="$USER" \
-e SRCDIR=/src \
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
-e DEBUG="${DEBUG:-0}" \
-v $REPO_DIRPATH:/src:O \
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
quay.io/libpod/get_ci_vm:latest "$@"
fi

View File

@@ -25,8 +25,12 @@ export MAKEDIR="$SCRIPTDIR/make"
# We're a nice, sexy, little shell script, and people might try to run us;
# but really, they shouldn't. We want to be in a container!
# The magic value is defined inside our Dockerfile.
if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
inContainer="AssumeSoInitially"
if [ "$PWD" != "/go/src/$SKOPEO_PKG" ]; then
unset inContainer
fi
if [ -z "$inContainer" ]; then
{
echo "# WARNING! I don't seem to be running in a Docker container."
echo "# The result of this command might be an incorrect build, and will not be"
@@ -35,9 +39,6 @@ if [[ "$container_magic" != "85531765-346b-4316-bdb8-358e4cca9e5d" ]]; then
echo "# Try this instead: make all"
echo "#"
} >&2
else
echo "# I appear to be running inside my designated container image, good!"
export SKOPEO_CONTAINER_TESTS=1
fi
echo

View File

@@ -5,7 +5,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
# are running more than one validate bundlescript
VALIDATE_REPO='https://github.com/containers/skopeo.git'
VALIDATE_BRANCH='main'
VALIDATE_BRANCH='master'
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"

View File

@@ -8,7 +8,7 @@ bundle_test_integration() {
# subshell so that we can export PATH without breaking other things
(
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make PREFIX=/usr install
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
bundle_test_integration
) 2>&1

View File

@@ -11,8 +11,8 @@ sed -i \
/etc/containers/storage.conf
# Build skopeo, install into /usr/bin
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make PREFIX=/usr install
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
# Run tests
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest

17
hack/travis_osx.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -e
export GOPATH=$(pwd)/_gopath
export PATH=$GOPATH/bin:$PATH
_containers="${GOPATH}/src/github.com/containers"
mkdir -vp ${_containers}
ln -vsf $(pwd) ${_containers}/skopeo
go version
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
cd ${_containers}/skopeo
make validate-local test-unit-local binary-local
sudo make install
skopeo -v

View File

@@ -1,123 +1,76 @@
# Installing from packages
## Distribution Packages
`skopeo` may already be packaged in your distribution.
### Fedora
`skopeo` may already be packaged in your distribution, for example on
RHEL/CentOS ≥ 8 or Fedora you can install it using:
```sh
sudo dnf -y install skopeo
$ sudo dnf install skopeo
```
### RHEL/CentOS ≥ 8 and CentOS Stream
on RHEL/CentOS ≤ 7.x:
```sh
sudo dnf -y install skopeo
$ sudo yum install skopeo
```
Newer Skopeo releases may be available on the repositories provided by the
Kubic project. Beware, these may not be suitable for production environments.
on CentOS 8:
for openSUSE:
```sh
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8/devel:kubic:libcontainers:stable.repo
sudo dnf -y install skopeo
$ sudo zypper install skopeo
```
on CentOS 8 Stream:
on alpine:
```sh
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
sudo dnf -y install skopeo
$ sudo apk add skopeo
```
### RHEL/CentOS ≤ 7.x
on macOS:
```sh
sudo yum -y install skopeo
$ brew install skopeo
```
### openSUSE
Debian (10 and newer including Raspbian) and Ubuntu (18.04 and newer): Packages
are available via the [Kubic][0] project repositories:
```sh
sudo zypper install skopeo
```
### Alpine
```sh
sudo apk add skopeo
```
### macOS
```sh
brew install skopeo
```
### Nix / NixOS
```sh
$ nix-env -i skopeo
```
### Debian
The skopeo package is available in
the [Bullseye (testing) branch](https://packages.debian.org/bullseye/skopeo), which
will be the next stable release (Debian 11) as well as Debian Unstable/Sid.
[0]: https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable
```bash
# Debian Testing/Bullseye or Unstable/Sid
sudo apt-get update
sudo apt-get -y install skopeo
# Debian Unstable/Sid:
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O- | sudo apt-key add -
```
### Raspberry Pi OS arm64 (beta)
Raspberry Pi OS uses the standard Debian's repositories,
so it is fully compatible with Debian's arm64 repository.
You can simply follow the [steps for Debian](#debian) to install Skopeo.
### Ubuntu
The skopeo package is available in the official repositories for Ubuntu 20.10
and newer.
```bash
# Ubuntu 20.10 and newer
sudo apt-get -y update
sudo apt-get -y install skopeo
# Debian Testing:
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O- | sudo apt-key add -
```
If you would prefer newer (though not as well-tested) packages,
the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS).
Checkout the [Kubic project page](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
for a list of supported Ubuntu version and
architecture combinations. **NOTE:** The command `sudo apt-get -y upgrade`
maybe required in some cases if Skopeo cannot be installed without it.
The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/skopeo/-/tree/debian/debian).
CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo
OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts.
```bash
. /etc/os-release
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install skopeo
# Debian 10:
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O- | sudo apt-key add -
```
```bash
# Raspbian 10:
$ echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O- | sudo apt-key add -
```
```bash
# Ubuntu (18.04, 19.04 and 19.10):
$ . /etc/os-release
$ sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"
$ wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O- | sudo apt-key add -
```
```bash
$ sudo apt-get update -qq
$ sudo apt-get install skopeo
```
Otherwise, read on for building and installing it from source:
@@ -126,8 +79,6 @@ To build the `skopeo` binary you need at least Go 1.12.
There are two ways to build skopeo: in a container, or locally without a
container. Choose the one which better matches your needs and environment.
## Building from Source
### Building without a container
Building without a container requires a bit more manual work and setup in your
@@ -141,32 +92,49 @@ Install the necessary dependencies:
```bash
# Fedora:
sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
$ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel
```
```bash
# Ubuntu (`libbtrfs-dev` requires Ubuntu 18.10 and above):
sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
$ sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev
```
```bash
# macOS:
brew install gpgme
$ brew install gpgme
```
```bash
# openSUSE:
sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
$ sudo zypper install libgpgme-devel device-mapper-devel libbtrfs-devel glib2-devel
```
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
```bash
git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
$ cd $GOPATH/src/github.com/containers/skopeo && make binary-local
```
By default the `make` command (make all) will build bin/skopeo and the documentation locally.
### Building in a container
Building in a container is simpler, but more restrictive:
- It requires the `podman` command and the ability to run Linux containers
- The created executable is a Linux executable, and depends on dynamic libraries
which may only be available only in a container of a similar Linux
distribution.
```bash
$ make binary # Or (make all) to also build documentation, see below.
```
To build a pure-Go static binary (disables devicemapper, btrfs, and gpgme):
```bash
$ make binary-static DISABLE_CGO=1
```
### Building documentation
@@ -174,31 +142,18 @@ To build the manual you will need go-md2man.
```bash
# Debian:
sudo apt-get install go-md2man
$ sudo apt-get install go-md2man
```
```
# Fedora:
sudo dnf install go-md2man
$ sudo dnf install go-md2man
```
Then
```bash
make docs
```
### Building in a container
Building in a container is simpler, but more restrictive:
- It requires the `podman` command and the ability to run Linux containers.
- The created executable is a Linux executable, and depends on dynamic libraries
which may only be available only in a container of a similar Linux
distribution.
```bash
$ make binary
$ make docs
```
### Installation
@@ -206,5 +161,5 @@ $ make binary
Finally, after the binary and documentation is built:
```bash
sudo make install
$ sudo make install
```

View File

@@ -102,7 +102,7 @@ func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
// copy to private registry using local authentication
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://docker.io/library/busybox:latest", imageName)
// inspect from private registry
// inspec from private registry
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
// logout from the registry
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)

View File

@@ -31,7 +31,6 @@ const (
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
v2s1DockerRegistryURL = "localhost:5556"
knownWindowsOnlyImage = "docker://mcr.microsoft.com/windows/nanoserver:1909"
knownListImage = "docker://registry.fedoraproject.org/fedora-minimal" // could have either ":latest" or "@sha256:..." appended
)
type CopySuite struct {
@@ -100,14 +99,14 @@ func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
dir, err := ioutil.TempDir("", "copy-manifest-list")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir)
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir)
}
func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) {
dir, err := ioutil.TempDir("", "copy-all-manifest-list")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir)
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir)
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "dir:"+dir)
}
func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
@@ -123,7 +122,7 @@ func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir1, "oci:"+oci2)
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci2, "dir:"+dir2)
@@ -145,9 +144,9 @@ func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox:latest", "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--all", "--format", "oci", knownListImage, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "--all", "--format", "oci", "docker://estesp/busybox:latest", "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
assertDirImagesAreEqual(c, dir1, dir2)
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
@@ -167,9 +166,9 @@ func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "--all", "oci:"+oci1, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", "docker://estesp/busybox:latest", "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "--all", "dir:"+dir2, "oci:"+oci2)
assertDirImagesAreEqual(c, dir1, dir2)
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
@@ -181,7 +180,7 @@ func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--all", knownListImage, "containers-storage:"+storage+"test")
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--all", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
}
func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
@@ -195,8 +194,8 @@ func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
@@ -213,9 +212,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test")
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", "docker://estesp/busybox:latest", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
@@ -234,33 +233,18 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
oci2, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
c.Assert(err, check.IsNil)
defer os.RemoveAll(oci2)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://estesp/busybox@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
c.Assert(out, check.Equals, "")
}
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
tempdir, err := ioutil.TempDir("", "tempdir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempdir)
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir1)
digestOutPath := filepath.Join(tempdir, "digest.txt")
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
readDigest, err := ioutil.ReadFile(digestOutPath)
c.Assert(err, check.IsNil)
_, err = digest.Parse(string(readDigest))
c.Assert(err, check.IsNil)
}
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
c.Assert(err, check.IsNil)
@@ -272,13 +256,13 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
}
@@ -294,13 +278,13 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox@"+digest, "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
}
@@ -310,14 +294,14 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
_, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m)))
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
@@ -332,7 +316,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
@@ -342,8 +326,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -368,7 +352,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
@@ -378,8 +362,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -404,7 +388,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
@@ -414,9 +398,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
var image1 imgspecv1.Image
@@ -440,7 +424,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", "docker://estesp/busybox:latest")
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
@@ -450,8 +434,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", "docker://estesp/busybox:latest", "containers-storage:"+storage+"test:latest")
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", "docker://estesp/busybox@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*error reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
var image1 imgspecv1.Image
@@ -480,16 +464,16 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
c.Assert(image5.Architecture, check.Equals, "arm64")
}
func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) {
storage, err := ioutil.TempDir("", "copy-fails-image-does-not-match-runtime")
func (s *CopySuite) TestCopyFailsWhenImageOSDoesntMatchRuntimeOS(c *check.C) {
storage, err := ioutil.TempDir("", "copy-fails-image-doesnt-match-runtime")
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
}
func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) {
storage, err := ioutil.TempDir("", "copy-succeeds-image-does-not-match-runtime-but-override")
func (s *CopySuite) TestCopySucceedsWhenImageDoesntMatchRuntimeButWeOverride(c *check.C) {
storage, err := ioutil.TempDir("", "copy-succeeds-image-doesnt-match-runtime-but-override")
c.Assert(err, check.IsNil)
defer os.RemoveAll(storage)
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
@@ -504,7 +488,7 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
// "push": dir: → atomic:
@@ -525,30 +509,30 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
// "push": dir: → docker(v2s2):
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned")
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"busybox:unsigned")
// The result of pushing and pulling is an unmodified image.
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"pause:unsigned", "dir:"+dir2)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", ourRegistry+"busybox:unsigned", "dir:"+dir2)
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
c.Assert(out, check.Equals, "")
// docker v2s2 -> OCI image layout with image name
// ociDest will be created by oci: if it doesn't exist
// so don't create it here to exercise auto-creation
ociDest := "pause-latest-image"
ociImgName := "pause"
ociDest := "busybox-latest-image"
ociImgName := "busybox"
defer os.RemoveAll(ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest+":"+ociImgName)
_, err = os.Stat(ociDest)
c.Assert(err, check.IsNil)
// docker v2s2 -> OCI image layout without image name
ociDest = "pause-latest-noimage"
ociDest = "busybox-latest-noimage"
defer os.RemoveAll(ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest)
_, err = os.Stat(ociDest)
c.Assert(err, check.IsNil)
}
@@ -642,7 +626,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
// Since the image is partially encrypted we should find layers that aren't encrypted
matchLayerBlobBinaryType(c, partiallyEncryptedImgDir+"/blobs/sha256", "application/x-gzip", 2)
// Decrypt the partially encrypted image
// Decrypt the partically encrypted image
assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key",
"oci:"+partiallyEncryptedImgDir+":encrypted", "oci:"+partiallyDecryptedImgDir+":decrypted")
@@ -736,7 +720,7 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// streaming: docker: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
// Compare (copies of) the original and the copy:
@@ -817,7 +801,7 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
"--policy", policy, "copy", "docker://busybox:latest", dirDest)
// type: insecureAcceptAnything
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://quay.io/openshift/origin-hello-openshift", dirDest)
assertSkopeoSucceeds(c, "", "--policy", policy, "copy", "docker://openshift/hello-openshift", dirDest)
// type: signedBy
// Sign the images
@@ -879,7 +863,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:armfh", topDirDest+"/dir1")
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:s390x", topDirDest+"/dir2")
// Sign the images. By coping from a topDirDest/dirN, also test that non-/restricted paths
// Sign the images. By coping fom a topDirDest/dirN, also test that non-/restricted paths
// use the dir:"" default of insecureAcceptAnything.
// (For signing, we must push to atomic: to get a Docker identity to use in the signature.)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "copy", "--sign-by", "personal@example.com", topDirDest+"/dir1", "atomic:localhost:5000/myns/personal:dirstaging")
@@ -1083,25 +1067,6 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
}
// copyWithSignedIdentity creates a copy of an unsigned image, adding a signature for an unrelated identity
// This should be easier than using standalone-sign.
func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, registriesDir string) {
topDir, err := ioutil.TempDir("", "copyWithSignedIdentity")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
signingDir := filepath.Join(topDir, "signing-temp")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
// Unknown error in Travis: https://github.com/containers/skopeo/issues/1093
// c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
}
// Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json
func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
const regPrefix = "docker://localhost:5006/myns/mirroring-"
@@ -1112,7 +1077,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
}
topDir, err := ioutil.TempDir("", "mirrored-signatures")
topDir, err := ioutil.TempDir("", "mirrored-signatures") // FIXME: Will this be used?
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable sigstore use
@@ -1122,7 +1087,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
defer os.Remove(policy)
// We use X-R-S-S for this testing to avoid having to deal with the sigstores.
// A downside is that OpenShift records signatures per image, so the error messages below
// A downside is that OpenShift records signatures per image, so the error messsages below
// list all signatures for other tags used for the same image as well.
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
@@ -1138,7 +1103,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
// Sign the image for the mirror
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"mirror:mirror-signed")
// Verify that a correctly signed image for the mirror is accessible using the mirror's reference
// Verify that a correctly signed image for the mirror is acessible using the mirror's reference
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:mirror-signed", dirDest)
// … but verify that while it is accessible using the primary location redirecting to the mirror, …
assertSkopeoSucceeds(c, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
@@ -1146,10 +1111,14 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*",
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
// Create a signature for mirroring-primary:primary-signed without pushing there.
copyWithSignedIdentity(c, regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed",
"localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com",
registriesDir)
// Create a signature for mirroring-primary:primary-signed without pushing there. This should be easier than using standalone-sign.
signingDir := filepath.Join(topDir, "signing-temp")
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", regPrefix+"primary:unsigned", "dir:"+signingDir)
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
filepath.Join(signingDir, "manifest.json"), "localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com")
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, regPrefix+"mirror:primary-signed")
// Verify that a correctly signed image for the primary is accessible using the primary's reference
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest)
// … but verify that while it is accessible using the mirror location
@@ -1157,20 +1126,6 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
// … verify it is NOT accessible when requiring a signature.
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*",
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:primary-signed", dirDest)
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", "--dest-tls-verify=false", regPrefix+"primary:unsigned", regPrefix+"remap:remapped")
// Verify that while a remapIdentity image is accessible using the remapped (mirror) location
assertSkopeoSucceeds(c, "" /* no --policy */, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
// … it is NOT accessible when requiring a signature …
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
// … until signed.
copyWithSignedIdentity(c, regPrefix+"remap:remapped", regPrefix+"remap:remapped",
"localhost:5006/myns/mirroring-primary:remapped", "personal@example.com",
registriesDir)
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
// and only the remapped prefix (mirror) is accessed.
assertSkopeoFails(c, ".*Error initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
}
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
@@ -1190,7 +1145,7 @@ func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
}
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) {
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWOTLSVerifyFalse(c *check.C) {
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
// dir:test isn't created beforehand just because we already know this could
@@ -1266,6 +1221,14 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
verifyManifestMIMEType(c, destDir, manifest.DockerV2Schema1SignedMediaType)
}
// Verify manifest in a dir: image at dir is expectedMIMEType.
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
c.Assert(err, check.IsNil)
mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType)
}
const regConfFixture = "./fixtures/registries.conf"
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {

View File

@@ -34,27 +34,10 @@
"keyPath": "@keydir@/personal-pubkey.gpg"
}
],
"localhost:5006/myns/mirroring-remap": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "@keydir@/personal-pubkey.gpg",
"signedIdentity": {
"type": "remapIdentity",
"prefix": "localhost:5006/myns/mirroring-remap",
"signedPrefix": "localhost:5006/myns/mirroring-primary"
}
}
],
"docker.io/openshift": [
{
"type": "insecureAcceptAnything"
}
],
"quay.io/openshift": [
{
"type": "insecureAcceptAnything"
}
]
},
"dir": {

View File

@@ -19,7 +19,7 @@ to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make bin/skopeo PREFIX=/usr install
cd ..; make binary-local install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json
curl -L -v 'http://localhost:5000/v2/'

View File

@@ -11,26 +11,8 @@ import (
"strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/go-check/check"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// A repository with a path with multiple components in it which
// contains multiple tags, preferably with some tags pointing to
// manifest lists, and with some tags that don't.
pullableRepo = "k8s.gcr.io/coredns/coredns"
// A tagged image in the repository that we can inspect and copy.
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
// A tagged manifest list in the repository that we can inspect and copy.
pullableTaggedManifestList = "k8s.gcr.io/coredns/coredns:v1.8.0"
// A repository containing multiple tags, some of which are for
// manifest lists, and which includes a "latest" tag. We specify the
// name here without a tag.
pullableRepoWithLatestTag = "k8s.gcr.io/pause"
)
func init() {
@@ -112,8 +94,8 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableTaggedImage
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
@@ -135,37 +117,9 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
c.Assert(out, check.Equals, "")
}
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableTaggedManifestList
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
dir1 := path.Join(tmpDir, "dir1")
dir2 := path.Join(tmpDir, "dir2")
// sync docker => dir
assertSkopeoSucceeds(c, "", "sync", "--all", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
c.Assert(err, check.IsNil)
// copy docker => dir
assertSkopeoSucceeds(c, "", "copy", "--all", "docker://"+image, "dir:"+dir2)
_, err = os.Stat(path.Join(dir2, "manifest.json"))
c.Assert(err, check.IsNil)
out := combinedOutputOfCommand(c, "diff", "-urN", path.Join(dir1, imagePath), dir2)
c.Assert(out, check.Equals, "")
}
func (s *SyncSuite) TestScoped(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableTaggedImage
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
@@ -173,7 +127,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
@@ -184,30 +138,30 @@ func (s *SyncSuite) TestScoped(c *check.C) {
}
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableRepoWithLatestTag
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
// make a copy of the image in the local registry
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, image))
//sync upstream image to dir, not scoped
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
c.Assert(err, check.IsNil)
//sync local registry image to dir, not scoped
assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
assertSkopeoFails(c, ".*Refusing to overwrite destination directory.*", "sync", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, image), dir1)
//sync local registry image to dir, scoped
imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference()))))
imageRef, err = docker.ParseReference(fmt.Sprintf("//%s", path.Join(v2DockerRegistryURL, image)))
c.Assert(err, check.IsNil)
imagePath = imageRef.DockerReference().String()
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, image), dir1)
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
c.Assert(err, check.IsNil)
os.RemoveAll(dir1)
@@ -219,8 +173,8 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableRepo
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "alpine"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
@@ -244,7 +198,7 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
defer os.RemoveAll(tmpDir)
dir1 := path.Join(tmpDir, "dir1")
image := pullableRepo
image := "alpine"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().Name()
@@ -255,22 +209,23 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
c.Check(len(tags), check.Not(check.Equals), 0)
yamlConfig := fmt.Sprintf(`
%s:
docker.io:
images:
%s: []
`, reference.Domain(imageRef.DockerReference()), reference.Path(imageRef.DockerReference()))
%s:
`, image)
// sync to the local registry
//sync to the local reg
yamlFile := path.Join(tmpDir, "registries.yaml")
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
// sync back from local registry to a folder
// sync back from local reg to a folder
os.Remove(yamlFile)
yamlConfig = fmt.Sprintf(`
%s:
tls-verify: false
images:
%s: []
%s:
`, v2DockerRegistryURL, imagePath)
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
@@ -279,9 +234,7 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
sysCtx = types.SystemContext{
DockerInsecureSkipTLSVerify: types.NewOptionalBool(true),
}
localImageRef, err := docker.ParseReference(fmt.Sprintf("//%s/%s", v2DockerRegistryURL, imagePath))
c.Assert(err, check.IsNil)
localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, localImageRef)
localTags, err := docker.GetRepositoryTags(context.Background(), &sysCtx, imageRef)
c.Assert(err, check.IsNil)
c.Check(len(localTags), check.Not(check.Equals), 0)
c.Assert(len(localTags), check.Equals, len(tags))
@@ -309,9 +262,9 @@ func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
k8s.gcr.io:
docker.io:
images-by-tag-regex:
pause: ^[12]\.0$ # regex string test
nginx: ^1\.13\.[12]-alpine-perl$ # regex string test
`
// the ↑ regex strings always matches only 2 images
var nTags = 2
@@ -336,37 +289,6 @@ k8s.gcr.io:
c.Assert(nManifests, check.Equals, nTags)
}
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
k8s.gcr.io:
images:
pause:
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
`
yamlFile := path.Join(tmpDir, "registries.yaml")
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
nManifests := 0
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && info.Name() == "manifest.json" {
nManifests++
return filepath.SkipDir
}
return nil
})
c.Assert(err, check.IsNil)
c.Assert(nManifests, check.Equals, 1)
}
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
@@ -374,21 +296,21 @@ func (s *SyncSuite) TestYaml2Dir(c *check.C) {
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
k8s.gcr.io:
docker.io:
images:
coredns/coredns:
- v1.8.0
- v1.7.1
k8s-dns-kube-dns:
- 1.14.12
- 1.14.13
pause:
busybox:
- latest
- musl
alpine:
- edge
- 3.8
opensuse/leap:
- latest
quay.io:
images:
quay/busybox:
- latest`
quay/busybox:
- latest`
// get the number of tags
re := regexp.MustCompile(`^ +- +[^:/ ]+`)
@@ -425,10 +347,10 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
dir1 := path.Join(tmpDir, "dir1")
image := pullableRepoWithLatestTag
image := "busybox"
tag := "latest"
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// copy docker => docker
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag)
@@ -474,26 +396,6 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
}
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
destDir1 := filepath.Join(tmpDir, "dest1")
destDir2 := filepath.Join(tmpDir, "dest2")
destDir3 := filepath.Join(tmpDir, "dest3")
//Split image:tag path from image URI for manifest comparison
imageDir := pullableTaggedImage[strings.LastIndex(pullableTaggedImage, "/")+1:]
assertSkopeoSucceeds(c, "", "sync", "--format=oci", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir1)
verifyManifestMIMEType(c, filepath.Join(destDir1, imageDir), imgspecv1.MediaTypeImageManifest)
assertSkopeoSucceeds(c, "", "sync", "--format=v2s2", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir2)
verifyManifestMIMEType(c, filepath.Join(destDir2, imageDir), manifest.DockerV2Schema2MediaType)
assertSkopeoSucceeds(c, "", "sync", "--format=v2s1", "--all", "--src", "docker", "--dest", "dir", pullableTaggedImage, destDir3)
verifyManifestMIMEType(c, filepath.Join(destDir3, imageDir), manifest.DockerV2Schema1SignedMediaType)
}
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
@@ -501,8 +403,8 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableTaggedImage
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
imagePath := imageRef.DockerReference().String()
@@ -534,8 +436,8 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
image := pullableRepoWithLatestTag
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
dir1 := path.Join(tmpDir, "dir1")
err = os.Mkdir(dir1, 0755)
@@ -544,10 +446,6 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
err = os.Mkdir(dir2, 0755)
c.Assert(err, check.IsNil)
// create leading dirs
err = os.MkdirAll(path.Dir(path.Join(dir1, image)), 0755)
c.Assert(err, check.IsNil)
// copy docker => dir
assertSkopeoSucceeds(c, "", "copy", "docker://"+image, "dir:"+path.Join(dir1, image))
_, err = os.Stat(path.Join(dir1, image, "manifest.json"))
@@ -556,13 +454,9 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
// sync dir => docker
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", dir1, v2DockerRegistryURL)
// create leading dirs
err = os.MkdirAll(path.Dir(path.Join(dir2, image)), 0755)
c.Assert(err, check.IsNil)
// copy docker => dir
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", localRegURL+image, "dir:"+path.Join(dir2, image))
_, err = os.Stat(path.Join(dir2, image, "manifest.json"))
_, err = os.Stat(path.Join(path.Join(dir2, image), "manifest.json"))
c.Assert(err, check.IsNil)
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
@@ -625,7 +519,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
}
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
repo := path.Join(v2DockerRegistryURL, "imagedoesdotexist")
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)

View File

@@ -10,7 +10,6 @@ import (
"strings"
"time"
"github.com/containers/image/v5/manifest"
"github.com/go-check/check"
)
@@ -201,11 +200,3 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
c.Assert(string(out), check.Matches, "(?s)"+regexp) // (?s) : '.' will also match newlines
}
}
// Verify manifest in a dir: image at dir is expectedMIMEType.
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
c.Assert(err, check.IsNil)
mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType)
}

View File

@@ -1,67 +0,0 @@
let
pkgs = (import ./nixpkgs.nix {
crossSystem = {
config = "aarch64-unknown-linux-gnu";
};
config = {
packageOverrides = pkg: {
gpgme = (static pkg.gpgme);
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs (x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
"-Ddevbindir=${placeholder ''dev''}/bin"
"-Dgtk_doc=false"
"-Dnls=disabled"
];
postInstall = ''
moveToOutput "share/glib-2.0" "$dev"
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
'';
});
};
};
});
static = pkg: pkg.overrideAttrs (x: {
doCheck = false;
configureFlags = (x.configureFlags or [ ]) ++ [
"--without-shared"
"--disable-shared"
];
dontDisableStatic = true;
enableSharedExecutables = false;
enableStatic = true;
});
self = with pkgs; buildGoModule rec {
name = "skopeo";
src = ./..;
vendorSha256 = null;
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static -pthread'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
'';
buildPhase = ''
patchShebangs .
make bin/skopeo
'';
installPhase = ''
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in
self

View File

@@ -1,65 +0,0 @@
{ system ? builtins.currentSystem }:
let
pkgs = (import ./nixpkgs.nix {
config = {
packageOverrides = pkg: {
gpgme = (static pkg.gpgme);
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs (x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
"-Ddevbindir=${placeholder ''dev''}/bin"
"-Dgtk_doc=false"
"-Dnls=disabled"
];
postInstall = ''
moveToOutput "share/glib-2.0" "$dev"
substituteInPlace "$dev/bin/gdbus-codegen" --replace "$out" "$dev"
sed -i "$dev/bin/glib-gettextize" -e "s|^gettext_dir=.*|gettext_dir=$dev/share/glib-2.0/gettext|"
sed '1i#line 1 "${x.pname}-${x.version}/include/glib-2.0/gobject/gobjectnotifyqueue.c"' \
-i "$dev"/include/glib-2.0/gobject/gobjectnotifyqueue.c
'';
});
};
};
});
static = pkg: pkg.overrideAttrs (x: {
doCheck = false;
configureFlags = (x.configureFlags or [ ]) ++ [
"--without-shared"
"--disable-shared"
];
dontDisableStatic = true;
enableSharedExecutables = false;
enableStatic = true;
});
self = with pkgs; buildGoModule rec {
name = "skopeo";
src = ./..;
vendorSha256 = null;
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash gitMinimal go-md2man installShellFiles makeWrapper pkg-config which ];
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static -pthread'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo osusergo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
'';
buildPhase = ''
patchShebangs .
make bin/skopeo
'';
installPhase = ''
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in
self

View File

@@ -1,10 +0,0 @@
{
"url": "https://github.com/nixos/nixpkgs",
"rev": "eb7e1ef185f6c990cda5f71fdc4fb02e76ab06d5",
"date": "2021-05-05T23:16:00+02:00",
"path": "/nix/store/a98lkhjlsqh32ic2kkrv5kkik6jy25wh-nixpkgs",
"sha256": "1ibz204c41g7baqga2iaj11yz9l75cfdylkiqjnk5igm81ivivxg",
"fetchSubmodules": false,
"deepClone": false,
"leaveDotGit": false
}

View File

@@ -1,8 +0,0 @@
let
json = builtins.fromJSON (builtins.readFile ./nixpkgs.json);
nixpkgs = import (builtins.fetchTarball {
name = "nixos-unstable";
url = "${json.url}/archive/${json.rev}.tar.gz";
inherit (json) sha256;
});
in nixpkgs

View File

@@ -1,51 +0,0 @@
## This Makefile is used to publish skopeo container images with Travis CI ##
## Environment variables, used in this Makefile are specified in .travis.yml
export DOCKER_CLI_EXPERIMENTAL=enabled
GOARCH ?= $(shell go env GOARCH)
# Dereference variable $(1), return value if non-empty, otherwise raise an error.
err_if_empty = $(if $(strip $($(1))),$(strip $($(1))),$(error Required $(1) variable is undefined or empty))
# Requires two arguments: Names of the username and the password env. vars.
define quay_login
@echo "$(call err_if_empty,$(2))" | \
docker login quay.io -u "$(call err_if_empty,$(1))" --password-stdin
endef
# Build container image of skopeo upstream based on host architecture
build-image/upstream:
docker build -t "${UPSTREAM_IMAGE}-${GOARCH}" contrib/skopeoimage/upstream
# Build container image of skopeo stable based on host architecture
build-image/stable:
docker build -t "${STABLE_IMAGE}-${GOARCH}" contrib/skopeoimage/stable
# Push container image of skopeo upstream (based on host architecture) to image repository
push-image/upstream:
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker push "${UPSTREAM_IMAGE}-${GOARCH}"
# Push container image of skopeo stable (based on host architecture) to image default and extra repositories
push-image/stable:
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker push "${STABLE_IMAGE}-${GOARCH}"
docker tag "${STABLE_IMAGE}-${GOARCH}" "${EXTRA_STABLE_IMAGE}-${GOARCH}"
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
docker push "${EXTRA_STABLE_IMAGE}-${GOARCH}"
# Create and push multiarch image manifest of skopeo upstream
push-manifest-multiarch/upstream:
docker manifest create "${UPSTREAM_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${UPSTREAM_IMAGE}-${arch})
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker manifest push --purge "${UPSTREAM_IMAGE}"
# Create and push multiarch image manifest of skopeo stable
push-manifest-multiarch/stable:
docker manifest create "${STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${STABLE_IMAGE}-${arch})
$(call quay_login,SKOPEO_QUAY_USERNAME,SKOPEO_QUAY_PASSWORD)
docker manifest push --purge "${STABLE_IMAGE}"
# Push to extra repository
docker manifest create "${EXTRA_STABLE_IMAGE}" $(foreach arch,${MULTIARCH_MANIFEST_ARCHITECTURES}, ${EXTRA_STABLE_IMAGE}-${arch})
$(call quay_login,CONTAINERS_QUAY_USERNAME,CONTAINERS_QUAY_PASSWORD)
docker manifest push --purge "${EXTRA_STABLE_IMAGE}"

View File

@@ -1,40 +0,0 @@
# skopeo container image build with Travis
This document describes the details and requirements to build and publish skopeo container images. The images are published as several architecture specific images and multiarch images on top for upstream and stable versions.
The Travis configuration is available at `.travis.yml`.
The code to build and publish images is available at `release/Makefile` and should be used only via Travis.
Travis workflow has 3 major pieces:
- `local-build` - build and test source code locally on osx and linux/amd64 environments, 2 jobs are running in parallel
- `image-build-push` - build and push container images with several Travis jobs running in parallel to build images for several architectures (linux/amd64, linux/s390x, linux/ppc64le). Build part is done for each PR, push part is executed only in case of cron job or master branch update.
- `manifest-multiarch-push` - create and push image manifests, which consists of architecture specific images from previous step. Executed only in case of cron job or master branch update.
## Ways to have full workflow run
- [cron job](https://docs.travis-ci.com/user/cron-jobs/#adding-cron-jobs)
- Trigger build from Travis CI
- Update code in master branch
## Environment variables
Several environment variables are used to customize image names and keep private credentials to push to quay.io repositories.
**Image tags** are specified in environment variable and should be manually updated in case of new release.
- `SKOPEO_QUAY_USERNAME` and `SKOPEO_QUAY_PASSWORD` are credentials to push images to `quay.io/skopeo/stable` and `quay.io/skopeo/upstream` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
- `CONTAINERS_QUAY_USERNAME` and `CONTAINERS_QUAY_PASSWORD` are credentials to push images to `quay.io/containers/skopeo` repos, and require the credentials to have write permissions. These variables should be specified in [Travis](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings).
Variables in .travis.yml
- `MULTIARCH_MANIFEST_ARCHITECTURES` is a list with architecture shortnames, to appear in final multiarch manifest. The values should fit to architectures used in the `image-build-push` Travis step.
- `STABLE_IMAGE`, `EXTRA_STABLE_IMAGE` are image names to publish stable Skopeo.
- `UPSTREAM_IMAGE` is an image name to publish upstream Skopeo.
### Values for environment variables
| Env variable | Value |
| -------------------------------- |----------------------------------|
| MULTIARCH_MANIFEST_ARCHITECTURES | "amd64 s390x ppc64le" |
| STABLE_IMAGE | quay.io/skopeo/stable:v1.2.0 |
| EXTRA_STABLE_IMAGE | quay.io/containers/skopeo:v1.2.0 |
| UPSTREAM_IMAGE | quay.io/skopeo/upstream:master |

View File

@@ -65,47 +65,59 @@ END_EXPECT
}
@test "inspect: env" {
remote_image=docker://quay.io/libpod/fedora:31
remote_image=docker://docker.io/fedora:latest
run_skopeo inspect $remote_image
inspect_remote=$output
# Simple check on 'inspect' output with environment variables.
# 1) Get remote image values of environment variables (the value of 'Env')
# 2) Confirm substring in check_array and the value of 'Env' match.
check_array=(FGC=f31 DISTTAG=f31container)
check_array=(PATH=.* )
remote=$(jq '.Env[]' <<<"$inspect_remote")
for substr in ${check_array[@]}; do
expect_output --from="$remote" --substring "$substr"
done
}
# Tests https://github.com/containers/skopeo/pull/708
@test "inspect: image manifest list w/ diff platform" {
# This image's manifest is for an os + arch that is... um, unlikely
# to support skopeo in the foreseeable future. Or past. The image
# is created by the make-noarch-manifest script in this directory.
img=docker://quay.io/libpod/notmyarch:20210121
# When --raw is provided, can inspect show the raw manifest list, w/o
# requiring any particular platform to be present
# To test whether container image can be inspected successfully w/o
# platform dependency.
# 1) Get current platform arch
# 2) Inspect container image is different from current platform arch
# 3) Compare output w/ expected result
# Get our host arch (what we're running on). This assumes that skopeo
# arch matches podman; it also assumes running podman >= April 2020
# (prior to that, the format keys were lower-case).
arch=$(podman info --format '{{.Host.Arch}}')
# Here we see a revolting workaround for a podman incompatibility
# change: in April 2020, podman info completely changed format
# of the keys. What worked until then now throws an error. We
# need to work with both old and new podman.
arch=$(podman info --format '{{.host.arch}}' || true)
if [[ -z "$arch" ]]; then
arch=$(podman info --format '{{.Host.Arch}}')
fi
# By default, 'inspect' tries to match our host os+arch. This should fail.
run_skopeo 1 inspect $img
expect_output --substring "Error parsing manifest for image: Error choosing image instance: no image found in manifest list for architecture $arch, variant " \
"skopeo inspect, without --raw, fails"
case $arch in
"amd64")
diff_arch_list="s390x ppc64le"
;;
"s390x")
diff_arch_list="amd64 ppc64le"
;;
"ppc64le")
diff_arch_list="amd64 s390x"
;;
"*")
diff_arch_list="amd64 s390x ppc64le"
;;
esac
# With --raw, we can inspect
run_skopeo inspect --raw $img
expect_output --substring "manifests.*platform.*architecture" \
"skopeo inspect --raw returns reasonable output"
# ...and what we get should be consistent with what our script created.
archinfo=$(jq -r '.manifests[0].platform | {os,variant,architecture} | join("-")' <<<"$output")
expect_output --from="$archinfo" "amigaos-1000-mc68000" \
"os - variant - architecture of $img"
for arch in $diff_arch_list; do
remote_image=docker://docker.io/$arch/golang
run_skopeo inspect --tls-verify=false --raw $remote_image
remote_arch=$(jq -r '.manifests[0]["platform"]["architecture"]' <<< "$output")
expect_output --from="$remote_arch" "$arch" "platform arch of $remote_image"
done
}
# vim: filetype=sh

View File

@@ -14,7 +14,7 @@ function setup() {
# From remote, to dir1, to local, to dir2;
# compare dir1 and dir2, expect no changes
@test "copy: dir, round trip" {
local remote_image=docker://quay.io/libpod/busybox:latest
local remote_image=docker://docker.io/library/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local dir1=$TESTDIR/dir1
@@ -30,7 +30,7 @@ function setup() {
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
@test "copy: oci, round trip" {
local remote_image=docker://quay.io/libpod/busybox:latest
local remote_image=docker://docker.io/library/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local dir1=$TESTDIR/oci1
@@ -45,8 +45,8 @@ function setup() {
}
# Compression zstd
@test "copy: oci, zstd" {
local remote_image=docker://quay.io/libpod/busybox:latest
@test "copy: oci, round trip, zstd" {
local remote_image=docker://docker.io/library/busybox:latest
local dir=$TESTDIR/dir
@@ -57,17 +57,11 @@ function setup() {
# Check there is at least one file that has the zstd magic number as the first 4 bytes
(for i in $dir/blobs/sha256/*; do test "$(head -c 4 $i)" = $magic && exit 0; done; exit 1)
# Check that the manifest's description of the image's first layer is the zstd layer type
instance=$(jq -r '.manifests[0].digest' $dir/index.json)
[[ "$instance" != null ]]
mediatype=$(jq -r '.layers[0].mediaType' < $dir/blobs/${instance/://})
[[ "$mediatype" == "application/vnd.oci.image.layer.v1.tar+zstd" ]]
}
# Same image, extracted once with :tag and once without
@test "copy: oci w/ and w/o tags" {
local remote_image=docker://quay.io/libpod/busybox:latest
local remote_image=docker://docker.io/library/busybox:latest
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
@@ -84,7 +78,7 @@ function setup() {
# Registry -> storage -> oci-archive
@test "copy: registry -> storage -> oci-archive" {
local alpine=quay.io/libpod/alpine:latest
local alpine=docker.io/library/alpine:latest
local tmp=$TESTDIR/oci
run_skopeo copy docker://$alpine containers-storage:$alpine
@@ -100,50 +94,6 @@ function setup() {
docker://localhost:5000/foo
}
# manifest format
@test "copy: manifest format" {
local remote_image=docker://quay.io/libpod/busybox:latest
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
run_skopeo copy --format v2s2 $remote_image dir:$dir1
run_skopeo copy --format oci $remote_image dir:$dir2
grep 'application/vnd.docker.distribution.manifest.v2' $dir1/manifest.json
grep 'application/vnd.oci.image' $dir2/manifest.json
}
# additional tag
@test "copy: additional tag" {
local remote_image=docker://quay.io/libpod/busybox:latest
# additional-tag is supported only for docker-archive
run_skopeo copy --additional-tag busybox:mine $remote_image \
docker-archive:$TESTDIR/mybusybox.tar:busybox:latest
mkdir -p $TESTDIR/podmanroot
run podman --root $TESTDIR/podmanroot load -i $TESTDIR/mybusybox.tar
run podman --root $TESTDIR/podmanroot images
expect_output --substring "mine"
}
# shared blob directory
@test "copy: shared blob directory" {
local remote_image=docker://quay.io/libpod/busybox:latest
local shareddir=$TESTDIR/shareddir
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
run_skopeo copy --dest-shared-blob-dir $shareddir \
$remote_image oci:$dir1
[ -n "$(ls $shareddir)" ]
[ -z "$(ls $dir1/blobs)" ]
run_skopeo copy --src-shared-blob-dir $shareddir \
oci:$dir1 oci:$dir2
diff -urN $shareddir $dir2/blobs
}
teardown() {
podman rm -f reg

View File

@@ -8,28 +8,19 @@ load helpers
function setup() {
standard_setup
start_registry --with-cert --enable-delete=true reg
start_registry --with-cert reg
}
@test "local registry, with cert" {
# Push to local registry...
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
docker://quay.io/libpod/busybox:latest \
docker://docker.io/library/busybox:latest \
docker://localhost:5000/busybox:unsigned
# ...and pull it back out
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned \
dir:$TESTDIR/extracted
# inspect with cert
run_skopeo inspect --cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned
expect_output --substring "localhost:5000/busybox"
# delete with cert
run_skopeo delete --cert-dir=$TESTDIR/client-auth \
docker://localhost:5000/busybox:unsigned
}
teardown() {

View File

@@ -18,7 +18,7 @@ function setup() {
testuser=testuser
testpassword=$(random_string 15)
start_registry --testuser=$testuser --testpassword=$testpassword --enable-delete=true reg
start_registry --testuser=$testuser --testpassword=$testpassword reg
}
@test "auth: credentials on command line" {
@@ -43,7 +43,7 @@ function setup() {
# These should pass
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
docker://quay.io/libpod/busybox:latest \
docker://docker.io/library/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo inspect --tls-verify=false --creds=$testuser:$testpassword \
docker://localhost:5000/busybox:mine
@@ -55,7 +55,7 @@ function setup() {
podman login --tls-verify=false -u $testuser -p $testpassword localhost:5000
run_skopeo copy --dest-tls-verify=false \
docker://quay.io/libpod/busybox:latest \
docker://docker.io/library/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "localhost:5000/busybox"
@@ -67,47 +67,6 @@ function setup() {
expect_output --substring "unauthorized: authentication required"
}
@test "auth: copy with --src-creds and --dest-creds" {
run_skopeo copy --dest-tls-verify=false --dest-creds=$testuser:$testpassword \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo copy --src-tls-verify=false --src-creds=$testuser:$testpassword \
docker://localhost:5000/busybox:mine \
dir:$TESTDIR/dir1
run ls $TESTDIR/dir1
expect_output --substring "manifest.json"
}
@test "auth: credentials via authfile" {
podman login --tls-verify=false --authfile $TESTDIR/test.auth -u $testuser -p $testpassword localhost:5000
# copy without authfile: should fail
run_skopeo 1 copy --dest-tls-verify=false \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
# copy with authfile: should work
run_skopeo copy --dest-tls-verify=false \
--authfile $TESTDIR/test.auth \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
# inspect without authfile: should fail
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "unauthorized: authentication required"
# inspect with authfile: should work
run_skopeo inspect --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
expect_output --substring "localhost:5000/busybox"
# delete without authfile: should fail
run_skopeo 1 delete --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "authentication required"
# delete with authfile: should work
run_skopeo delete --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine
}
teardown() {
podman rm -f reg

View File

@@ -92,7 +92,7 @@ END_POLICY_JSON
fi
# Cache local copy
run_skopeo copy docker://quay.io/libpod/busybox:latest \
run_skopeo copy docker://docker.io/library/busybox:latest \
dir:$TESTDIR/busybox
# Push a bunch of images. Do so *without* --policy flag; this lets us
@@ -143,75 +143,6 @@ END_PUSH
END_TESTS
}
@test "signing: remove signature" {
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
if [[ "$output" =~ 'signing is not supported' ]]; then
skip "skopeo built without support for creating signatures"
return 1
fi
if [ "$status" -ne 0 ]; then
die "exit code is $status; expected 0"
fi
# Cache local copy
run_skopeo copy docker://quay.io/libpod/busybox:latest \
dir:$TESTDIR/busybox
# Push a signed image
run_skopeo --registries.d $REGISTRIES_D \
copy --dest-tls-verify=false \
--sign-by=alice@test.redhat.com \
dir:$TESTDIR/busybox \
docker://localhost:5000/myns/alice:signed
# Fetch the image with signature
run_skopeo --registries.d $REGISTRIES_D \
--policy $POLICY_JSON \
copy --src-tls-verify=false \
docker://localhost:5000/myns/alice:signed \
dir:$TESTDIR/busybox-signed
# Fetch the image with removing signature
run_skopeo --registries.d $REGISTRIES_D \
--policy $POLICY_JSON \
copy --src-tls-verify=false \
--remove-signatures \
docker://localhost:5000/myns/alice:signed \
dir:$TESTDIR/busybox-unsigned
ls $TESTDIR/busybox-signed | grep "signature"
[ -z "$(ls $TESTDIR/busybox-unsigned | grep "signature")" ]
}
@test "signing: standalone" {
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
if [[ "$output" =~ 'signing is not supported' ]]; then
skip "skopeo built without support for creating signatures"
return 1
fi
if [ "$status" -ne 0 ]; then
die "exit code is $status; expected 0"
fi
run_skopeo copy --dest-tls-verify=false \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:latest
run_skopeo copy --src-tls-verify=false \
docker://localhost:5000/busybox:latest \
dir:$TESTDIR/busybox
# Standalone sign
run_skopeo standalone-sign -o $TESTDIR/busybox.signature \
$TESTDIR/busybox/manifest.json \
localhost:5000/busybox:latest \
alice@test.redhat.com
# Standalone verify
fingerprint=$(gpg --list-keys | grep -B1 alice.test.redhat.com | head -n 1)
run_skopeo standalone-verify $TESTDIR/busybox/manifest.json \
localhost:5000/busybox:latest \
$fingerprint \
$TESTDIR/busybox.signature
# manifest digest
digest=$(echo "$output" | awk '{print $4;}')
run_skopeo manifest-digest $TESTDIR/busybox/manifest.json
expect_output $digest
}
teardown() {
podman rm -f reg

View File

@@ -13,7 +13,7 @@ function setup() {
# delete image from registry
@test "delete: remove image from registry" {
local remote_image=docker://quay.io/libpod/busybox:latest
local remote_image=docker://docker.io/library/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local output=

View File

@@ -6,7 +6,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
# Default image to run as a local registry
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-docker.io/library/registry:2}
###############################################################################
# BEGIN setup/teardown
@@ -194,7 +194,7 @@ function expect_output() {
fi
# This is a multi-line message, which may in turn contain multi-line
# output, so let's format it ourselves, readably
# output, so let's format it ourself, readably
local -a actual_split
readarray -t actual_split <<<"$actual"
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
@@ -331,8 +331,7 @@ start_registry() {
log_and_run openssl req -newkey rsa:4096 -nodes -sha256 \
-keyout $AUTHDIR/domain.key -x509 -days 2 \
-out $CERT \
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
-addext subjectAltName=DNS:localhost
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
fi
reg_args+=(

View File

@@ -1,70 +0,0 @@
#!/bin/sh
#
# Tool for creating an image whose OS and arch will (probably) never
# match a system on which skopeo will run. This image will be used
# in the 'inspect' test.
#
set -ex
# Name and tag of the image we create
imgname=notmyarch
imgtag=$(date +%Y%m%d)
# (In case older image exists from a prior run)
buildah rmi $imgname:$imgtag &>/dev/null || true
#
# Step 1: create an image containing only a README and a copy of this script
#
id=$(buildah from scratch)
now=$(date --rfc-3339=seconds)
readme=$(mktemp -t README.XXXXXXXX)
ME=$(basename $0)
cat >| $readme <<EOF
This is a dummy image intended solely for skopeo testing.
This image was created $now
The script used to create this image is available as $ME
EOF
buildah copy $id $readme /README
buildah copy $id $0 /$ME
buildah commit $id my_tmp_image
buildah rm $id
#
# Step 2: create a manifest list, then add the above image but with
# an os+arch override.
#
buildah manifest create $imgname:$imgtag
buildah manifest add \
--os amigaos \
--arch mc68000 \
--variant 1000 \
$imgname:$imgtag my_tmp_image
# Done. Show instructions.
cat <<EOF
DONE!
You can inspect the created image with:
skopeo inspect --raw containers-storage:localhost/$imgname:$imgtag | jq .
(FIXME: is there a way to, like, mount the image and verify the files?)
If you're happy with this image, you can now:
buildah manifest push --all $imgname:$imgtag docker://quay.io/libpod/$imgname:$imgtag
Once done, you urgently need to:
buildah rmi $imgname:$imgtag my_tmp_image
If you don't do this, 'podman images' will barf catastrophically!
EOF

View File

@@ -1 +0,0 @@
* @microsoft/containerplat

View File

@@ -1,4 +1,4 @@
# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and

View File

@@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View File

@@ -0,0 +1,344 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
CreationTime time.Time // creation time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxCreationTime = "LIBARCHIVE.creationtime"
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View File

@@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View File

@@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View File

@@ -0,0 +1,444 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
var unixTime int64
if !t.Before(minTime) && !t.After(maxTime) {
unixTime = t.Unix()
}
formatNumeric(b, unixTime, paxNone)
// Write a PAX header if the time didn't fit precisely.
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
paxHeaders[paxKeyword] = formatPAXTime(t)
}
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
if !hdr.AccessTime.IsZero() {
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
}
if !hdr.ChangeTime.IsZero() {
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
}
if !hdr.CreationTime.IsZero() {
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
}
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
func formatPAXTime(t time.Time) string {
sec := t.Unix()
usec := t.Nanosecond()
s := strconv.FormatInt(sec, 10)
if usec != 0 {
s = fmt.Sprintf("%s.%09d", s, usec)
}
return s
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

View File

@@ -1,68 +0,0 @@
package backuptar
import (
"archive/tar"
"fmt"
"strconv"
"strings"
"time"
)
// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
// Idea taken from containerd which did the same thing.
// parsePAXTime takes a string of the form %d.%d as described in the PAX
// specification. Note that this implementation allows for negative timestamps,
// which is allowed for by the PAX specification, but not always portable.
func parsePAXTime(s string) (time.Time, error) {
const maxNanoSecondDigits = 9
// Split string into seconds and sub-seconds parts.
ss, sn := s, ""
if pos := strings.IndexByte(s, '.'); pos >= 0 {
ss, sn = s[:pos], s[pos+1:]
}
// Parse the seconds.
secs, err := strconv.ParseInt(ss, 10, 64)
if err != nil {
return time.Time{}, tar.ErrHeader
}
if len(sn) == 0 {
return time.Unix(secs, 0), nil // No sub-second values
}
// Parse the nanoseconds.
if strings.Trim(sn, "0123456789") != "" {
return time.Time{}, tar.ErrHeader
}
if len(sn) < maxNanoSecondDigits {
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*nsecs), nil // Negative correction
}
return time.Unix(secs, nsecs), nil
}
// formatPAXTime converts ts into a time of the form %d.%d as described in the
// PAX specification. This function is capable of negative timestamps.
func formatPAXTime(ts time.Time) (s string) {
secs, nsecs := ts.Unix(), ts.Nanosecond()
if nsecs == 0 {
return strconv.FormatInt(secs, 10)
}
// If seconds is negative, then perform correction.
sign := ""
if secs < 0 {
sign = "-" // Remember sign
secs = -(secs + 1) // Add a second to secs
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
}
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
}

View File

@@ -3,7 +3,6 @@
package backuptar
import (
"archive/tar"
"encoding/base64"
"errors"
"fmt"
@@ -16,7 +15,7 @@ import (
"time"
"github.com/Microsoft/go-winio"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
@@ -33,13 +32,11 @@ const (
)
const (
hdrFileAttributes = "MSWINDOWS.fileattr"
hdrSecurityDescriptor = "MSWINDOWS.sd"
hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
hdrMountPoint = "MSWINDOWS.mountpoint"
hdrEaPrefix = "MSWINDOWS.xattr."
hdrCreationTime = "LIBARCHIVE.creationtime"
hdrFileAttributes = "fileattr"
hdrSecurityDescriptor = "sd"
hdrRawSecurityDescriptor = "rawsd"
hdrMountPoint = "mountpoint"
hdrEaPrefix = "xattr."
)
func writeZeroes(w io.Writer, count int64) error {
@@ -89,17 +86,16 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
Format: tar.FormatPAX,
Name: filepath.ToSlash(name),
Size: size,
Typeflag: tar.TypeReg,
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
PAXRecords: make(map[string]string),
Name: filepath.ToSlash(name),
Size: size,
Typeflag: tar.TypeReg,
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
Winheaders: make(map[string]string),
}
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
@@ -159,7 +155,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
if err != nil {
return err
}
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
@@ -170,7 +166,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
return err
}
if rp.IsMountPoint {
hdr.PAXRecords[hdrMountPoint] = "1"
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
@@ -187,7 +183,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
@@ -258,7 +254,6 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
@@ -298,13 +293,12 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
// Default to ModTime, we'll pull hdrCreationTime below if present
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
}
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
@@ -315,13 +309,6 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
creationTime, err := parsePAXTime(creationTimeStr)
if err != nil {
return "", 0, nil, err
}
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
}
return
}
@@ -334,13 +321,13 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
@@ -361,7 +348,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
var eas []winio.ExtendedAttribute
for k, v := range hdr.PAXRecords {
for k, v := range hdr.Winheaders {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
@@ -393,7 +380,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,

View File

@@ -5,14 +5,21 @@ package winio
import (
"os"
"runtime"
"syscall"
"unsafe"
)
"golang.org/x/sys/windows"
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
const (
fileBasicInfo = 0
fileIDInfo = 0x12
)
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uint32
pad uint32 // padding
}
@@ -20,7 +27,7 @@ type FileBasicInfo struct {
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -29,32 +36,13 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return nil
}
// FileStandardInfo contains extended information for the file.
// FILE_STANDARD_INFO in WinBase.h
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
type FileStandardInfo struct {
AllocationSize, EndOfFile int64
NumberOfLinks uint32
DeletePending, Directory bool
}
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
return si, nil
}
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
// unique on a system.
type FileIDInfo struct {
@@ -65,7 +53,7 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)

View File

@@ -3,7 +3,7 @@ module github.com/Microsoft/go-winio
go 1.12
require (
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.1
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
)

View File

@@ -1,14 +1,18 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -1,5 +1,3 @@
// +build windows
package winio
import (

View File

@@ -182,14 +182,13 @@ func (s pipeAddress) String() string {
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err == nil {
return h, nil
}
@@ -198,7 +197,7 @@ func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Hand
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(10 * time.Millisecond)
time.Sleep(time.Millisecond * 10)
}
}
}
@@ -211,7 +210,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
absTimeout = time.Now().Add(2 * time.Second)
absTimeout = time.Now().Add(time.Second * 2)
}
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
conn, err := DialPipeContext(ctx, path)
@@ -224,15 +223,9 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
// cancellation or timeout.
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
}
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
// cancellation or timeout.
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
var err error
var h syscall.Handle
h, err = tryDialPipe(ctx, &path, access)
h, err = tryDialPipe(ctx, &path)
if err != nil {
return nil, err
}
@@ -429,10 +422,10 @@ type PipeConfig struct {
// when the pipe is in message mode.
MessageMode bool
// InputBufferSize specifies the size of the input buffer, in bytes.
// InputBufferSize specifies the size the input buffer, in bytes.
InputBufferSize int32
// OutputBufferSize specifies the size of the output buffer, in bytes.
// OutputBufferSize specifies the size the input buffer, in bytes.
OutputBufferSize int32
}

View File

@@ -1,5 +1,3 @@
// +build windows
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,

View File

@@ -1,161 +0,0 @@
// +build windows
package security
import (
"os"
"syscall"
"unsafe"
"github.com/pkg/errors"
)
type (
accessMask uint32
accessMode uint32
desiredAccess uint32
inheritMode uint32
objectType uint32
shareMode uint32
securityInformation uint32
trusteeForm uint32
trusteeType uint32
explicitAccess struct {
accessPermissions accessMask
accessMode accessMode
inheritance inheritMode
trustee trustee
}
trustee struct {
multipleTrustee *trustee
multipleTrusteeOperation int32
trusteeForm trusteeForm
trusteeType trusteeType
name uintptr
}
)
const (
accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ
accessModeGrant accessMode = 1
desiredAccessReadControl desiredAccess = 0x20000
desiredAccessWriteDac desiredAccess = 0x40000
gvmga = "GrantVmGroupAccess:"
inheritModeNoInheritance inheritMode = 0x0
inheritModeSubContainersAndObjectsInherit inheritMode = 0x3
objectTypeFileObject objectType = 0x1
securityInformationDACL securityInformation = 0x4
shareModeRead shareMode = 0x1
shareModeWrite shareMode = 0x2
sidVmGroup = "S-1-5-83-0"
trusteeFormIsSid trusteeForm = 0
trusteeTypeWellKnownGroup trusteeType = 5
)
// GrantVMGroupAccess sets the DACL for a specified file or directory to
// include Grant ACE entries for the VM Group SID. This is a golang re-
// implementation of the same function in vmcompute, just not exported in
// RS5. Which kind of sucks. Sucks a lot :/
func GrantVmGroupAccess(name string) error {
// Stat (to determine if `name` is a directory).
s, err := os.Stat(name)
if err != nil {
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
}
// Get a handle to the file/directory. Must defer Close on success.
fd, err := createFile(name, s.IsDir())
if err != nil {
return err // Already wrapped
}
defer syscall.CloseHandle(fd)
// Get the current DACL and Security Descriptor. Must defer LocalFree on success.
ot := objectTypeFileObject
si := securityInformationDACL
sd := uintptr(0)
origDACL := uintptr(0)
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
// Generate a new DACL which is the current DACL with the required ACEs added.
// Must defer LocalFree on success.
newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL)
if err != nil {
return err // Already wrapped
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
// And finally use SetSecurityInfo to apply the updated DACL.
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
}
return nil
}
// createFile is a helper function to call [Nt]CreateFile to get a handle to
// the file or directory.
func createFile(name string, isDir bool) (syscall.Handle, error) {
namep := syscall.StringToUTF16(name)
da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
sm := uint32(shareModeRead | shareModeWrite)
fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
if isDir {
fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
}
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
if err != nil {
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
}
return fd, nil
}
// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added.
// The caller is responsible for LocalFree of the returned DACL on success.
func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
// Generate pointers to the SIDs based on the string SIDs
sid, err := syscall.StringToSid(sidVmGroup)
if err != nil {
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
}
inheritance := inheritModeNoInheritance
if isDir {
inheritance = inheritModeSubContainersAndObjectsInherit
}
eaArray := []explicitAccess{
explicitAccess{
accessPermissions: accessMaskDesiredPermission,
accessMode: accessModeGrant,
inheritance: inheritance,
trustee: trustee{
trusteeForm: trusteeFormIsSid,
trusteeType: trusteeTypeWellKnownGroup,
name: uintptr(unsafe.Pointer(sid)),
},
},
}
modifiedDACL := uintptr(0)
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
}
return modifiedDACL, nil
}

View File

@@ -1,7 +0,0 @@
package security
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo
//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW

View File

@@ -1,70 +0,0 @@
// Code generated by 'go generate'; DO NOT EDIT.
package security
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo")
procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW")
procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo")
)
func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) {
r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) {
r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) {
r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@@ -28,9 +28,8 @@ const (
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
)
const (

View File

@@ -1,3 +1,3 @@
package winio
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go

View File

@@ -2,322 +2,150 @@
package vhd
import (
"fmt"
"syscall"
import "syscall"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk
//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath
type (
CreateVirtualDiskFlag uint32
VirtualDiskFlag uint32
AttachVirtualDiskFlag uint32
DetachVirtualDiskFlag uint32
VirtualDiskAccessMask uint32
)
type VirtualStorageType struct {
type virtualStorageType struct {
DeviceID uint32
VendorID guid.GUID
VendorID [16]byte
}
type CreateVersion2 struct {
UniqueID guid.GUID
type (
createVirtualDiskFlag uint32
VirtualDiskAccessMask uint32
VirtualDiskFlag uint32
)
const (
// Flags for creating a VHD (not exported)
createVirtualDiskFlagNone createVirtualDiskFlag = 0
createVirtualDiskFlagFullPhysicalAllocation createVirtualDiskFlag = 1
createVirtualDiskFlagPreventWritesToSourceDisk createVirtualDiskFlag = 2
createVirtualDiskFlagDoNotCopyMetadataFromParent createVirtualDiskFlag = 4
// Access Mask for opening a VHD
VirtualDiskAccessNone VirtualDiskAccessMask = 0
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 65536
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 131072
VirtualDiskAccessDetach VirtualDiskAccessMask = 262144
VirtualDiskAccessGetInfo VirtualDiskAccessMask = 524288
VirtualDiskAccessCreate VirtualDiskAccessMask = 1048576
VirtualDiskAccessMetaOps VirtualDiskAccessMask = 2097152
VirtualDiskAccessRead VirtualDiskAccessMask = 851968
VirtualDiskAccessAll VirtualDiskAccessMask = 4128768
VirtualDiskAccessWritable VirtualDiskAccessMask = 3276800
// Flags for opening a VHD
OpenVirtualDiskFlagNone VirtualDiskFlag = 0
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x1
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x2
OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x4
OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x8
OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x10
OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x20
OpenVirtualDiskFlagVhdSetFileOnly VirtualDiskFlag = 0x40
OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x80
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x100
)
type createVersion2 struct {
UniqueID [16]byte // GUID
MaximumSize uint64
BlockSizeInBytes uint32
SectorSizeInBytes uint32
PhysicalSectorSizeInByte uint32
ParentPath *uint16 // string
SourcePath *uint16 // string
OpenFlags uint32
ParentVirtualStorageType VirtualStorageType
SourceVirtualStorageType VirtualStorageType
ResiliencyGUID guid.GUID
ParentVirtualStorageType virtualStorageType
SourceVirtualStorageType virtualStorageType
ResiliencyGUID [16]byte // GUID
}
type CreateVirtualDiskParameters struct {
type createVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 CreateVersion2
Version2 createVersion2
}
type OpenVersion2 struct {
GetInfoOnly bool
ReadOnly bool
ResiliencyGUID guid.GUID
type openVersion2 struct {
GetInfoOnly int32 // bool but 4-byte aligned
ReadOnly int32 // bool but 4-byte aligned
ResiliencyGUID [16]byte // GUID
}
type OpenVirtualDiskParameters struct {
type openVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 OpenVersion2
Version2 openVersion2
}
type AttachVersion2 struct {
RestrictedOffset uint64
RestrictedLength uint64
}
type AttachVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 AttachVersion2
}
const (
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
// Access Mask for opening a VHD
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000
VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000
VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000
VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000
VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
// Flags for creating a VHD
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
// Flags for opening a VHD
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004
OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008
OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010
OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020
OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040
OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
// Flags for attaching a VHD
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004
AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008
AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010
AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020
AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040
AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
// Flags for detaching a VHD
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
)
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
// default values.
// CreateVhdx will create a simple vhdx file at the given path using default values.
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
params := CreateVirtualDiskParameters{
var (
defaultType virtualStorageType
handle syscall.Handle
)
parameters := createVirtualDiskParameters{
Version: 2,
Version2: CreateVersion2{
Version2: createVersion2{
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
},
}
handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, &params)
if err != nil {
if err := createVirtualDisk(
&defaultType,
path,
uint32(VirtualDiskAccessNone),
nil,
uint32(createVirtualDiskFlagNone),
0,
&parameters,
nil,
&handle); err != nil {
return err
}
if err := syscall.CloseHandle(handle); err != nil {
return err
}
return nil
}
// DetachVirtualDisk detaches a virtual hard disk by handle.
func DetachVirtualDisk(handle syscall.Handle) (err error) {
if err := detachVirtualDisk(handle, 0, 0); err != nil {
return errors.Wrap(err, "failed to detach virtual disk")
}
return nil
}
// DetachVhd detaches a vhd found at `path`.
// DetachVhd detaches a mounted container layer vhd found at `path`.
func DetachVhd(path string) error {
handle, err := OpenVirtualDisk(
path,
VirtualDiskAccessNone,
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator,
)
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator)
if err != nil {
return err
}
defer syscall.CloseHandle(handle)
return DetachVirtualDisk(handle)
}
// AttachVirtualDisk attaches a virtual hard disk for use.
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
if err := attachVirtualDisk(
handle,
nil,
uint32(attachVirtualDiskFlag),
0,
parameters,
nil,
); err != nil {
return errors.Wrap(err, "failed to attach virtual disk")
}
return nil
}
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
func AttachVhd(path string) (err error) {
handle, err := OpenVirtualDisk(
path,
VirtualDiskAccessNone,
OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator,
)
if err != nil {
return err
}
defer syscall.CloseHandle(handle)
params := AttachVirtualDiskParameters{Version: 2}
if err := AttachVirtualDisk(
handle,
AttachVirtualDiskFlagNone,
&params,
); err != nil {
return errors.Wrap(err, "failed to attach virtual disk")
}
return nil
return detachVirtualDisk(handle, 0, 0)
}
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
parameters := OpenVirtualDiskParameters{Version: 2}
handle, err := OpenVirtualDiskWithParameters(
vhdPath,
virtualDiskAccessMask,
openVirtualDiskFlags,
&parameters,
func OpenVirtualDisk(path string, accessMask VirtualDiskAccessMask, flag VirtualDiskFlag) (syscall.Handle, error) {
var (
defaultType virtualStorageType
handle syscall.Handle
)
if err != nil {
parameters := openVirtualDiskParameters{Version: 2}
if err := openVirtualDisk(
&defaultType,
path,
uint32(accessMask),
uint32(flag),
&parameters,
&handle); err != nil {
return 0, err
}
return handle, nil
}
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
)
if parameters.Version != 2 {
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
if err := openVirtualDisk(
&defaultType,
vhdPath,
uint32(virtualDiskAccessMask),
uint32(openVirtualDiskFlags),
parameters,
&handle,
); err != nil {
return 0, errors.Wrap(err, "failed to open virtual disk")
}
return handle, nil
}
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
)
if parameters.Version != 2 {
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
}
if err := createVirtualDisk(
&defaultType,
path,
uint32(virtualDiskAccessMask),
nil,
uint32(createVirtualDiskFlags),
0,
parameters,
nil,
&handle,
); err != nil {
return handle, errors.Wrap(err, "failed to create virtual disk")
}
return handle, nil
}
// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical
// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer
// that represents the particular enumeration of the physical disk on the caller's system.
func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
var (
diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars
diskPhysicalPathBuf [256]uint16
)
if err := getVirtualDiskPhysicalPath(
handle,
&diskPathSizeInBytes,
&diskPhysicalPathBuf[0],
); err != nil {
return "", errors.Wrap(err, "failed to get disk physical path")
}
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
}
// CreateDiffVhd is a helper function to create a differencing virtual disk.
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
// Setting `ParentPath` is how to signal to create a differencing disk.
createParams := &CreateVirtualDiskParameters{
Version: 2,
Version2: CreateVersion2{
ParentPath: windows.StringToUTF16Ptr(baseVhdPath),
BlockSizeInBytes: blockSizeInMB * 1024 * 1024,
OpenFlags: uint32(OpenVirtualDiskFlagCachedIO),
},
}
vhdHandle, err := CreateVirtualDisk(
diffVhdPath,
VirtualDiskAccessNone,
CreateVirtualDiskFlagNone,
createParams,
)
if err != nil {
return fmt.Errorf("failed to create differencing vhd: %s", err)
}
if err := syscall.CloseHandle(vhdHandle); err != nil {
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
}
return nil
}

99
vendor/github.com/Microsoft/go-winio/vhd/zvhd.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package vhd
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk")
procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk")
)
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
}
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle)
}
func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@@ -1,106 +0,0 @@
// Code generated by 'go generate'; DO NOT EDIT.
package vhd
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll")
procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk")
procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk")
procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk")
procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath")
procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk")
)
func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle)
}
func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) {
r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
}
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@@ -19,7 +19,6 @@ const (
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
@@ -27,7 +26,7 @@ var (
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
@@ -38,62 +37,243 @@ func errnoErr(e syscall.Errno) error {
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procbind = modws2_32.NewProc("bind")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
err = errnoErr(e1)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -101,7 +281,11 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -118,73 +302,126 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
return
}
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
} else {
_p0 = 0
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
} else {
_p0 = 0
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
@@ -205,27 +442,53 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -238,14 +501,22 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -258,162 +529,22 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
err = errnoErr(e1)
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = errnoErr(e1)
}
return
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
ptr = uintptr(r0)
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
@@ -421,7 +552,11 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@@ -1 +0,0 @@
* text=auto eol=lf

View File

@@ -1,3 +1 @@
*.exe
.idea
.vscode

17
vendor/github.com/Microsoft/hcsshim/.gometalinter.json generated vendored Normal file
View File

@@ -0,0 +1,17 @@
{
"Vendor": true,
"Deadline": "2m",
"Sort": [
"linter",
"severity",
"path",
"line"
],
"Skip": [
"internal\\schema2"
],
"EnableGC": true,
"Enable": [
"gofmt"
]
}

View File

@@ -1 +1,3 @@
* @microsoft/containerplat
* @microsoft/containerplat
/hcn/* @nagiesek

View File

@@ -25,7 +25,6 @@ plugins = ["grpc", "fieldpath"]
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/struct.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
@@ -36,14 +35,20 @@ plugins = ["grpc", "fieldpath"]
prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"]
plugins = ["ttrpc"]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"]
plugins = ["ttrpc"]
# Lock down runhcs config
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"]
plugins = ["ttrpc"]
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]
[[overrides]]
prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"]
plugins = ["ttrpc"]
[[descriptors]]
prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
target = "cmd/containerd-shim-runhcs-v1/stats/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]

Some files were not shown because too many files have changed in this diff Show More