Compare commits

..

3 Commits

Author SHA1 Message Date
Miloslav Trmač
89966c513c Release 1.9.3
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
Miloslav Trmač
2382f751cb Update to c/image v5.22.1
> go get github.com/containers/image/v5@v5.22.1
> make vendor

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
Miloslav Trmač
f24433e290 Pin Go to 1.18
1.19 has changed the expected gofmt format, and we don't want
to follow such changes on the stable branch.

go@1.18 is "keg-only", i.e. not installed by Brew to /usr/local/bin,
so we need to change PATH to point at it (as the installation instructs
us to).

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
2233 changed files with 67843 additions and 240152 deletions

View File

@@ -23,10 +23,10 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
####
FEDORA_NAME: "fedora-37"
FEDORA_NAME: "fedora-36"
# Google-cloud VM Images
IMAGE_SUFFIX: "c6300530360713216"
IMAGE_SUFFIX: "c5495735033528320"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
# Container FQIN's
@@ -75,25 +75,22 @@ doccheck_task:
"${SKOPEO_PATH}/${SCRIPT_BASE}/runner.sh" doccheck
osx_task:
# Don't run for docs-only or multi-arch image builds.
# Also don't run on release-branches or their PRs,
# since base container-image is not version-constrained.
only_if: &not_docs_or_release_branch >-
($CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH ||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH ) &&
# Run for regular PRs and those with [CI:BUILD] but not [CI:DOCS]
only_if: &not_docs_multiarch >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
depends_on:
- validate
macos_instance:
image: ghcr.io/cirruslabs/macos-ventura-base:latest
image: catalina-xcode
setup_script: |
export PATH=$GOPATH/bin:$PATH
# /usr/local/opt/go@1.18 will be populated by (brew install go@1.18) below
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
brew update
brew install gpgme go go-md2man
brew install gpgme go@1.18 go-md2man
go install golang.org/x/lint/golint@latest
test_script: |
export PATH=$GOPATH/bin:$PATH
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
go version
go env
make validate-local test-unit-local bin/skopeo
@@ -103,9 +100,7 @@ osx_task:
cross_task:
alias: cross
only_if: >-
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
$CIRRUS_CRON != 'multiarch'
only_if: *not_docs_multiarch
depends_on:
- validate
gce_instance: &standardvm
@@ -125,42 +120,6 @@ cross_task:
"${GOSRC}/${SCRIPT_BASE}/runner.sh" cross
ostree-rs-ext_task:
alias: proxy_ostree_ext
only_if: *not_docs_or_release_branch
# WARNING: This task potentially performs a container image
# build (on change) with runtime package installs. Therefore,
# its behavior can be unpredictable and potentially flake-prone.
# In case of emergency, uncomment the next statement to bypass.
#
# skip: $CI == "true"
#
depends_on:
- validate
# Ref: https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
container:
# The runtime image will be rebuilt on change
dockerfile: contrib/cirrus/ostree_ext.dockerfile
docker_arguments: # required build-args
BASE_FQIN: quay.io/coreos-assembler/fcos-buildroot:testing-devel
CIRRUS_IMAGE_VERSION: 1
env:
EXT_REPO_NAME: ostree-rs-ext
EXT_REPO_HOME: $CIRRUS_WORKING_DIR/../$EXT_REPO_NAME
EXT_REPO: https://github.com/ostreedev/${EXT_REPO_NAME}.git
skopeo_build_script:
- dnf builddep -y skopeo
- make
- make install
proxy_ostree_ext_build_script:
- git clone --depth 1 $EXT_REPO $EXT_REPO_HOME
- cd $EXT_REPO_HOME
- cargo test --no-run
proxy_ostree_ext_test_script:
- cd $EXT_REPO_HOME
- cargo test -- --nocapture --quiet
#####
##### NOTE: This task is subtantially duplicated in the containers/image
##### repository's `.cirrus.yml`. Changes made here should be fully merged
@@ -284,7 +243,6 @@ success_task:
- doccheck
- osx
- cross
- proxy_ostree_ext
- test_skopeo
- image_build
- meta

View File

@@ -1,74 +0,0 @@
/*
Renovate is a service similar to GitHub Dependabot, but with
(fantastically) more configuration options. So many options
in fact, if you're new I recommend glossing over this cheat-sheet
prior to the official documentation:
https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
Configuration Update/Change Procedure:
1. Make changes
2. Manually validate changes (from repo-root):
podman run -it \
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
docker.io/renovate/renovate:latest \
renovate-config-validator
3. Commit.
Configuration Reference:
https://docs.renovatebot.com/configuration-options/
Monitoring Dashboard:
https://app.renovatebot.com/dashboard#github/containers
Note: The Renovate bot will create/manage it's business on
branches named 'renovate/*'. Otherwise, and by
default, the only the copy of this file that matters
is the one on the `main` branch. No other branches
will be monitored or touched in any way.
*/
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
/*************************************************
****** Global/general configuration options *****
*************************************************/
// Re-use predefined sets of configuration options to DRY
"extends": [
// https://github.com/containers/automation/blob/main/renovate/defaults.json5
"github>containers/automation//renovate/defaults.json5"
],
// Permit automatic rebasing when base-branch changes by more than
// one commit.
"rebaseWhen": "behind-base-branch",
/*************************************************
*** Repository-specific configuration options ***
*************************************************/
// Don't leave dep. update. PRs "hanging", assign them to people.
"assignees": ["containers/image-maintainers"], // same for skopeo
/*************************************************
***** Golang-specific configuration options *****
*************************************************/
"golang": {
// N/B: LAST MATCHING RULE WINS
// https://docs.renovatebot.com/configuration-options/#packagerules
"packageRules": [
// Package version retraction (https://go.dev/ref/mod#go-mod-file-retract)
// is broken in Renovate
// ref: https://github.com/renovatebot/renovate/issues/13012
{
"matchPackageNames": ["github.com/containers/common"],
// Both v1.0.0 and v1.0.1 should be ignored.
"allowedVersions": "!/v((1.0.0)|(1.0.1))$/"
},
],
},
}

View File

@@ -3,18 +3,103 @@
# See also:
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
# Required to un-FUBAR default ${{github.workflow}} value
name: check_cirrus_cron
on:
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '03 03 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '59 23 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
permissions:
contents: read
env:
# Debug-mode can reveal secrets, only enable by a secret value.
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
# CSV listing of e-mail addresses for delivery failure or error notices
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
# Filename for table of cron-name to build-id data
# (must be in $GITHUB_WORKSPACE/artifacts/)
NAME_ID_FILEPATH: './artifacts/name_id.txt'
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_failures:
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
secrets: inherit
cron_failures:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
with:
persist-credentials: false
# Avoid duplicating cron_failures.sh in skopeo repo.
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
with:
repository: containers/podman
path: '_podman'
persist-credentials: false
- name: Get failed cron names and Build IDs
id: cron
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
- if: steps.cron.outputs.failures > 0
shell: bash
# Must be inline, since context expressions are used.
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
run: |
set -eo pipefail
(
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
echo ""
while read -r NAME BID; do
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
done < "$NAME_ID_FILEPATH"
echo ""
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
# Separate content from sendgrid.com automatic footer.
echo ""
echo ""
) > ./artifacts/email_body.txt
- if: steps.cron.outputs.failures > 0
name: Send failure notification e-mail
# Ref: https://github.com/dawidd6/action-send-mail
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
with:
server_address: ${{secrets.ACTION_MAIL_SERVER}}
server_port: 465
username: ${{secrets.ACTION_MAIL_USERNAME}}
password: ${{secrets.ACTION_MAIL_PASSWORD}}
subject: Cirrus-CI cron build failures on ${{github.repository}}
to: ${{env.RCPTCSV}}
from: ${{secrets.ACTION_MAIL_SENDER}}
body: file://./artifacts/email_body.txt
- if: always()
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
with:
name: ${{ github.job }}_artifacts
path: artifacts/*
- if: failure()
name: Send error notification e-mail
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
with:
server_address: ${{secrets.ACTION_MAIL_SERVER}}
server_port: 465
username: ${{secrets.ACTION_MAIL_USERNAME}}
password: ${{secrets.ACTION_MAIL_PASSWORD}}
subject: Github workflow error on ${{github.repository}}
to: ${{env.RCPTCSV}}
from: ${{secrets.ACTION_MAIL_SENDER}}
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"

View File

@@ -1,19 +0,0 @@
---
# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
on:
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '01 01 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_rerun:
uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
secrets: inherit

View File

@@ -17,7 +17,7 @@ jobs:
pull-requests: write # for actions/stale to close stale PRs
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v7
- uses: actions/stale@98ed4cb500039dbcccf4bd9bedada4d0187f2757 # v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'A friendly reminder that this issue had no activity for 30 days.'

View File

@@ -28,15 +28,10 @@ ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
# Note possibly non-obvious aspects of this:
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
# $(shell … ;) defeats that heuristic (recommended in
# https://savannah.gnu.org/bugs/index.php?57625 ).
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
# Multiple scripts are sensitive to this value, make sure it's exported/available
# N/B: Need to use 'command -v' here for compatibility with MacOS.
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
# Go module support: set `-mod=vendor` to use the vendored sources.
# See also hack/make.sh.
@@ -55,6 +50,8 @@ ifeq ($(GOOS), linux)
endif
endif
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
# You can increase test output verbosity with the option '-test.vv'.
# You can select certain tests to run, with `-test.run <regex>` for example:
@@ -90,7 +87,7 @@ endif
CONTAINER_GOSRC = /src/github.com/containers/skopeo
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
EXTRA_LDFLAGS ?=
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
@@ -197,11 +194,7 @@ shell:
check: validate test-unit test-integration test-system
test-integration:
# This is intended to be equal to $(CONTAINER_RUN), but with --cap-add=cap_mknod.
# --cap-add=cap_mknod is important to allow skopeo to use containers-storage: directly as it exists in the callers environment, without
# creating a nested user namespace (which requires /etc/subuid and /etc/subgid to be set up)
$(CONTAINER_CMD) --security-opt label=disable --cap-add=cap_mknod -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN) \
$(MAKE) test-integration-local
$(CONTAINER_RUN) $(MAKE) test-integration-local
# Intended for CI, assumed to be running in quay.io/libpod/skopeo_cidev container.
@@ -209,6 +202,7 @@ test-integration-local: bin/skopeo
hack/make.sh test-integration
# complicated set of options needed to run podman-in-podman
# TODO: The $(RM) command will likely fail w/o `podman unshare`
test-system:
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
$(CONTAINER_CMD) --privileged \
@@ -217,7 +211,7 @@ test-system:
"$(SKOPEO_CIDEV_CONTAINER_FQIN)" \
$(MAKE) test-system-local; \
rc=$$?; \
$(CONTAINER_RUNTIME) unshare rm -rf $$DTEMP; # This probably doesn't work with Docker, oh well, better than nothing... \
-$(RM) -rf $$DTEMP; \
exit $$rc
# Intended for CI, assumed to already be running in quay.io/libpod/skopeo_cidev container.
@@ -248,12 +242,12 @@ test-unit-local: bin/skopeo
$(GO) test $(MOD_VENDOR) -tags "$(BUILDTAGS)" $$($(GO) list $(MOD_VENDOR) -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
vendor:
$(GO) mod tidy -compat=1.17
$(GO) mod tidy
$(GO) mod vendor
$(GO) mod verify
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
# CAUTION: This is not a replacement for RPMs provided by your distro.
# Only intended to build and test the latest unreleased changes.

View File

@@ -1,8 +1,7 @@
<!--- skopeo [![Build Status](https://travis-ci.org/containers/skopeo.svg?branch=main)](https://travis-ci.org/containers/skopeo)
skopeo [![Build Status](https://travis-ci.org/containers/skopeo.svg?branch=master)](https://travis-ci.org/containers/skopeo)
=
--->
<img src="https://cdn.rawgit.com/containers/skopeo/main/docs/skopeo.svg" width="250">
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
----
@@ -57,37 +56,29 @@ Examples:
$ skopeo inspect docker://registry.fedoraproject.org/fedora:latest
{
"Name": "registry.fedoraproject.org/fedora",
"Digest": "sha256:0f65bee641e821f8118acafb44c2f8fe30c2fc6b9a2b3729c0660376391aa117",
"Digest": "sha256:655721ff613ee766a4126cb5e0d5ae81598e1b0c3bcf7017c36c4d72cb092fe9",
"RepoTags": [
"34-aarch64",
"34",
"latest",
...
"24",
"25",
"26-modular",
...
],
"Created": "2022-11-24T13:54:18Z",
"Created": "2020-04-29T06:48:16Z",
"DockerVersion": "1.10.1",
"Labels": {
"license": "MIT",
"name": "fedora",
"vendor": "Fedora Project",
"version": "37"
"version": "32"
},
"Architecture": "amd64",
"Os": "linux",
"Layers": [
"sha256:2a0fc6bf62e155737f0ace6142ee686f3c471c1aab4241dc3128904db46288f0"
],
"LayersData": [
{
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"Digest": "sha256:2a0fc6bf62e155737f0ace6142ee686f3c471c1aab4241dc3128904db46288f0",
"Size": 71355009,
"Annotations": null
}
"sha256:3088721d7dbf674fc0be64cd3cf00c25aab921cacf35fa0e7b1578500a3e1653"
],
"Env": [
"DISTTAG=f37container",
"FGC=f37",
"DISTTAG=f32container",
"FGC=f32",
"container=oci"
]
}
@@ -209,7 +200,6 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
| -------------------------------------------------- | ---------------------------------------------------------------------------------------------|
| [skopeo-copy(1)](/docs/skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
| [skopeo-delete(1)](/docs/skopeo-delete.1.md) | Mark the image-name for later deletion by the registry's garbage collector. |
| [skopeo-generate-sigstore-key(1)](/docs/skopeo-generate-sigstore-key.1.md) | Generate a sigstore public/private key pair. |
| [skopeo-inspect(1)](/docs/skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
| [skopeo-list-tags(1)](/docs/skopeo-list-tags.1.md) | Return a list of tags for the transport-specific image repository. |
| [skopeo-login(1)](/docs/skopeo-login.1.md) | Login to a container registry. |
@@ -217,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
License
-

View File

@@ -13,8 +13,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/cli"
"github.com/containers/image/v5/pkg/cli/sigstore"
"github.com/containers/image/v5/signature/signer"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
encconfig "github.com/containers/ocicrypt/config"
@@ -31,7 +29,6 @@ type copyOptions struct {
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
signBySigstoreParamFile string // Sign the image using a sigstore signature per configuration in a param file
signBySigstorePrivateKey string // Sign the image using a sigstore private key
signPassphraseFile string // Path pointing to a passphrase file when signing (for either signature format, but only one of them)
signIdentity string // Identity of the signed image, must be a fully specified docker reference
@@ -86,7 +83,6 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.StringVar(&opts.signBySigstoreParamFile, "sign-by-sigstore", "", "Sign the image using a sigstore parameter file at `PATH`")
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "Read a passphrase for signing an image from `PATH`")
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
@@ -256,22 +252,6 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
passphrase = p
} // opts.signByFingerprint triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldnt prompt ourselves if no passphrase was explicitly provided.
var signers []*signer.Signer
if opts.signBySigstoreParamFile != "" {
signer, err := sigstore.NewSignerFromParameterFile(opts.signBySigstoreParamFile, &sigstore.Options{
PrivateKeyPassphrasePrompt: func(keyFile string) (string, error) {
return promptForPassphrase(keyFile, os.Stdin, os.Stdout)
},
Stdin: os.Stdin,
Stdout: stdout,
})
if err != nil {
return fmt.Errorf("Error using --sign-by-sigstore: %w", err)
}
defer signer.Close()
signers = append(signers, signer)
}
var signIdentity reference.Named = nil
if opts.signIdentity != "" {
signIdentity, err = reference.ParseNamed(opts.signIdentity)
@@ -280,12 +260,9 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
}
}
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
return retry.IfNecessary(ctx, func() error {
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
Signers: signers,
SignBy: opts.signByFingerprint,
SignPassphrase: passphrase,
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,

View File

@@ -1,90 +0,0 @@
package main
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"github.com/containers/image/v5/pkg/cli"
"github.com/containers/image/v5/signature/sigstore"
"github.com/spf13/cobra"
)
type generateSigstoreKeyOptions struct {
outputPrefix string
passphraseFile string
}
func generateSigstoreKeyCmd() *cobra.Command {
var opts generateSigstoreKeyOptions
cmd := &cobra.Command{
Use: "generate-sigstore-key [command options] --output-prefix PREFIX",
Short: "Generate a sigstore public/private key pair",
RunE: commandAction(opts.run),
Example: "skopeo generate-sigstore-key --output-prefix my-key",
}
adjustUsage(cmd)
flags := cmd.Flags()
flags.StringVar(&opts.outputPrefix, "output-prefix", "", "Write the keys to `PREFIX`.pub and `PREFIX`.private")
flags.StringVar(&opts.passphraseFile, "passphrase-file", "", "Read a passphrase for the private key from `PATH`")
return cmd
}
// ensurePathDoesNotExist verifies that path does not refer to an existing file,
// and returns an error if so.
func ensurePathDoesNotExist(path string) error {
switch _, err := os.Stat(path); {
case err == nil:
return fmt.Errorf("Refusing to overwrite existing %q", path)
case errors.Is(err, fs.ErrNotExist):
return nil
default:
return fmt.Errorf("Error checking existence of %q: %w", path, err)
}
}
func (opts *generateSigstoreKeyOptions) run(args []string, stdout io.Writer) error {
if len(args) != 0 || opts.outputPrefix == "" {
return errors.New("Usage: generate-sigstore-key --output-prefix PREFIX")
}
pubKeyPath := opts.outputPrefix + ".pub"
privateKeyPath := opts.outputPrefix + ".private"
if err := ensurePathDoesNotExist(pubKeyPath); err != nil {
return err
}
if err := ensurePathDoesNotExist(privateKeyPath); err != nil {
return err
}
var passphrase string
if opts.passphraseFile != "" {
p, err := cli.ReadPassphraseFile(opts.passphraseFile)
if err != nil {
return err
}
passphrase = p
} else {
p, err := promptForPassphrase(privateKeyPath, os.Stdin, os.Stdout)
if err != nil {
return err
}
passphrase = p
}
keys, err := sigstore.GenerateKeyPair([]byte(passphrase))
if err != nil {
return fmt.Errorf("Error generating key pair: %w", err)
}
if err := os.WriteFile(privateKeyPath, keys.PrivateKey, 0600); err != nil {
return fmt.Errorf("Error writing private key to %q: %w", privateKeyPath, err)
}
if err := os.WriteFile(pubKeyPath, keys.PublicKey, 0644); err != nil {
return fmt.Errorf("Error writing private key to %q: %w", pubKeyPath, err)
}
fmt.Fprintf(stdout, "Key written to %q and %q", privateKeyPath, pubKeyPath)
return nil
}

View File

@@ -1,79 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGenerateSigstoreKey(t *testing.T) {
// Invalid command-line arguments
for _, args := range [][]string{
{},
{"--output-prefix", "foo", "a1"},
} {
out, err := runSkopeo(append([]string{"generate-sigstore-key"}, args...)...)
assertTestFailed(t, out, err, "Usage")
}
// One of the destination files already exists
outputSuffixes := []string{".pub", ".private"}
for _, suffix := range outputSuffixes {
dir := t.TempDir()
prefix := filepath.Join(dir, "prefix")
err := os.WriteFile(prefix+suffix, []byte{}, 0600)
require.NoError(t, err)
out, err := runSkopeo("generate-sigstore-key",
"--output-prefix", prefix, "--passphrase-file", "/dev/null",
)
assertTestFailed(t, out, err, "Refusing to overwrite")
}
// One of the destinations is inaccessible (simulate by a symlink that tries to
// traverse a non-directory)
for _, suffix := range outputSuffixes {
dir := t.TempDir()
nonDirectory := filepath.Join(dir, "nondirectory")
err := os.WriteFile(nonDirectory, []byte{}, 0600)
require.NoError(t, err)
prefix := filepath.Join(dir, "prefix")
err = os.Symlink(filepath.Join(nonDirectory, "unaccessible"), prefix+suffix)
require.NoError(t, err)
out, err := runSkopeo("generate-sigstore-key",
"--output-prefix", prefix, "--passphrase-file", "/dev/null",
)
assertTestFailed(t, out, err, prefix+suffix) // + an OS-specific error message
}
destDir := t.TempDir()
// Error reading passphrase
out, err := runSkopeo("generate-sigstore-key",
"--output-prefix", filepath.Join(destDir, "prefix"),
"--passphrase-file", filepath.Join(destDir, "this-does-not-exist"),
)
assertTestFailed(t, out, err, "this-does-not-exist")
// (The interactive passphrase prompting is not yet tested)
// Error writing outputs is untested: when unit tests run as root, we cant use permissions on a directory to cause write failures,
// with the --output-prefix mechanism, and refusing to even start writing to pre-exisiting files, directories are the only mechanism
// we have to trigger a write failure.
// Success
// Just a smoke-test, useability of the keys is tested in the generate implementation.
dir := t.TempDir()
prefix := filepath.Join(dir, "prefix")
passphraseFile := filepath.Join(dir, "passphrase")
err = os.WriteFile(passphraseFile, []byte("some passphrase"), 0600)
require.NoError(t, err)
out, err = runSkopeo("generate-sigstore-key",
"--output-prefix", prefix, "--passphrase-file", passphraseFile,
)
assert.NoError(t, err)
for _, suffix := range outputSuffixes {
assert.Contains(t, out, prefix+suffix)
}
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"text/template"
@@ -17,7 +18,6 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/skopeo/cmd/skopeo/inspect"
"github.com/docker/distribution/registry/api/errcode"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -160,7 +160,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
} else {
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
data = append(data, config)
err = printTmpl(stdout, row, data)
err = printTmpl(row, data)
}
if err != nil {
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %w", err)
@@ -186,7 +186,6 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
Architecture: imgInspect.Architecture,
Os: imgInspect.Os,
Layers: imgInspect.Layers,
LayersData: imgInspect.LayersData,
Env: imgInspect.Env,
}
outputData.Digest, err = manifest.Digest(rawManifest)
@@ -203,26 +202,12 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
if err != nil {
// Some registries may decide to block the "list all tags" endpoint;
// gracefully allow the inspect to continue in this case:
fatalFailure := true
// - AWS ECR rejects it if the "ecr:ListImages" action is not allowed.
// https://github.com/containers/skopeo/issues/726
var ec errcode.ErrorCoder
if ok := errors.As(err, &ec); ok && ec.ErrorCode() == errcode.ErrorCodeDenied {
fatalFailure = false
}
// - public.ecr.aws does not implement the endpoint at all, and fails with 404:
// https://github.com/containers/skopeo/issues/1230
// This is actually "code":"NOT_FOUND", and the parser doesnt preserve that.
// So, also check the error text.
if ok := errors.As(err, &ec); ok && ec.ErrorCode() == errcode.ErrorCodeUnknown {
var e errcode.Error
if ok := errors.As(err, &e); ok && e.Code == errcode.ErrorCodeUnknown && e.Message == "404 page not found" {
fatalFailure = false
}
}
if fatalFailure {
// some registries may decide to block the "list all tags" endpoint
// gracefully allow the inspect to continue in this case. Currently
// the IBM Bluemix container registry has this restriction.
// In addition, AWS ECR rejects it with 403 (Forbidden) if the "ecr:ListImages"
// action is not allowed.
if !strings.Contains(err.Error(), "401") && !strings.Contains(err.Error(), "403") {
return fmt.Errorf("Error determining repository tags: %w", err)
}
logrus.Warnf("Registry disallows tag list retrieval; skipping")
@@ -237,14 +222,14 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
row := "{{range . }}" + report.NormalizeFormat(opts.format) + "{{end}}"
data = append(data, outputData)
return printTmpl(stdout, row, data)
return printTmpl(row, data)
}
func printTmpl(stdout io.Writer, row string, data []interface{}) error {
func printTmpl(row string, data []interface{}) error {
t, err := template.New("skopeo inspect").Parse(row)
if err != nil {
return err
}
w := tabwriter.NewWriter(stdout, 8, 2, 2, ' ', 0)
w := tabwriter.NewWriter(os.Stdout, 8, 2, 2, ' ', 0)
return t.Execute(w, data)
}

View File

@@ -3,7 +3,6 @@ package inspect
import (
"time"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -20,6 +19,5 @@ type Output struct {
Architecture string
Os string
Layers []string
LayersData []types.ImageInspectLayer
Env []string
}

View File

@@ -98,7 +98,6 @@ func createApp() (*cobra.Command, *globalOptions) {
rootCommand.AddCommand(
copyCmd(&opts),
deleteCmd(&opts),
generateSigstoreKeyCmd(),
inspectCmd(&opts),
layersCmd(&opts),
loginCmd(&opts),

View File

@@ -73,16 +73,11 @@ import (
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
dockerdistributionerrcode "github.com/docker/distribution/registry/api/errcode"
dockerdistributionapi "github.com/docker/distribution/registry/api/v2"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -93,9 +88,7 @@ import (
// 0.2.1: Initial version
// 0.2.2: Added support for fetching image configuration as OCI
// 0.2.3: Added GetFullConfig
// 0.2.4: Added OpenImageOptional
// 0.2.5: Added LayerInfoJSON
const protocolVersion = "0.2.5"
const protocolVersion = "0.2.3"
// maxMsgSize is the current limit on a packet size.
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
@@ -107,9 +100,6 @@ const maxMsgSize = 32 * 1024
// integers are above this.
const maxJSONFloat = float64(uint64(1)<<53 - 1)
// sentinelImageID represents "image not found" on the wire
const sentinelImageID = 0
// request is the JSON serialization of a function call
type request struct {
// Method is the name of the function
@@ -176,14 +166,6 @@ type proxyHandler struct {
activePipes map[uint32]*activePipe
}
// convertedLayerInfo is the reduced form of the OCI type BlobInfo
// Used in the return value of GetLayerInfo
type convertedLayerInfo struct {
Digest digest.Digest `json:"digest"`
Size int64 `json:"size"`
MediaType string `json:"media_type"`
}
// Initialize performs one-time initialization, and returns the protocol version
func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
h.lock.Lock()
@@ -215,29 +197,6 @@ func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
// The return value is an opaque integer handle.
func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
return h.openImageImpl(args, false)
}
// isDockerManifestUnknownError is a copy of code from containers/image,
// please update there first.
func isDockerManifestUnknownError(err error) bool {
var ec dockerdistributionerrcode.ErrorCoder
if !errors.As(err, &ec) {
return false
}
return ec.ErrorCode() == dockerdistributionapi.ErrorCodeManifestUnknown
}
// isNotFoundImageError heuristically attempts to determine whether an error
// is saying the remote source couldn't find the image (as opposed to an
// authentication error, an I/O error etc.)
// TODO drive this into containers/image properly
func isNotFoundImageError(err error) bool {
return isDockerManifestUnknownError(err) ||
errors.Is(err, ocilayout.ImageNotFoundError{})
}
func (h *proxyHandler) openImageImpl(args []interface{}, allowNotFound bool) (replyBuf, error) {
h.lock.Lock()
defer h.lock.Unlock()
var ret replyBuf
@@ -259,15 +218,9 @@ func (h *proxyHandler) openImageImpl(args []interface{}, allowNotFound bool) (re
}
imgsrc, err := imgRef.NewImageSource(context.Background(), h.sysctx)
if err != nil {
if allowNotFound && isNotFoundImageError(err) {
ret.value = sentinelImageID
return ret, nil
}
return ret, err
}
// Note that we never return zero as an imageid; this code doesn't yet
// handle overflow though.
h.imageSerial++
openimg := &openImage{
id: h.imageSerial,
@@ -279,13 +232,6 @@ func (h *proxyHandler) openImageImpl(args []interface{}, allowNotFound bool) (re
return ret, nil
}
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
// The return value is an opaque integer handle. If the image does not exist, zero
// is returned.
func (h *proxyHandler) OpenImageOptional(args []interface{}) (replyBuf, error) {
return h.openImageImpl(args, true)
}
func (h *proxyHandler) CloseImage(args []interface{}) (replyBuf, error) {
h.lock.Lock()
defer h.lock.Unlock()
@@ -332,9 +278,6 @@ func (h *proxyHandler) parseImageFromID(v interface{}) (*openImage, error) {
if err != nil {
return nil, err
}
if imgid == sentinelImageID {
return nil, fmt.Errorf("Invalid imageid value of zero")
}
imgref, ok := h.images[imgid]
if !ok {
return nil, fmt.Errorf("no image %v", imgid)
@@ -599,12 +542,10 @@ func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
piper, f, err := h.allocPipe()
if err != nil {
blobr.Close()
return ret, err
}
go func() {
// Signal completion when we return
defer blobr.Close()
defer f.wg.Done()
verifier := d.Verifier()
tr := io.TeeReader(blobr, verifier)
@@ -627,56 +568,6 @@ func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
return ret, nil
}
// GetLayerInfo returns data about the layers of an image, useful for reading the layer contents.
//
// This needs to be called since the data returned by GetManifest() does not allow to correctly
// calling GetBlob() for the containers-storage: transport (which doesnt store the original compressed
// representations referenced in the manifest).
func (h *proxyHandler) GetLayerInfo(args []interface{}) (replyBuf, error) {
h.lock.Lock()
defer h.lock.Unlock()
var ret replyBuf
if h.sysctx == nil {
return ret, fmt.Errorf("client error: must invoke Initialize")
}
if len(args) != 1 {
return ret, fmt.Errorf("found %d args, expecting (imgid)", len(args))
}
imgref, err := h.parseImageFromID(args[0])
if err != nil {
return ret, err
}
ctx := context.TODO()
err = h.cacheTargetManifest(imgref)
if err != nil {
return ret, err
}
img := imgref.cachedimg
layerInfos, err := img.LayerInfosForCopy(ctx)
if err != nil {
return ret, err
}
if layerInfos == nil {
layerInfos = img.LayerInfos()
}
var layers []convertedLayerInfo
for _, layer := range layerInfos {
layers = append(layers, convertedLayerInfo{layer.Digest, layer.Size, layer.MediaType})
}
ret.value = layers
return ret, nil
}
// FinishPipe waits for the worker goroutine to finish, and closes the write side of the pipe.
func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
h.lock.Lock()
@@ -705,17 +596,6 @@ func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
return ret, err
}
// close releases all resources associated with this proxy backend
func (h *proxyHandler) close() {
for _, image := range h.images {
err := image.src.Close()
if err != nil {
// This shouldn't be fatal
logrus.Warnf("Failed to close image %s: %v", transports.ImageName(image.cachedimg.Reference()), err)
}
}
}
// send writes a reply buffer to the socket
func (buf replyBuf) send(conn *net.UnixConn, err error) error {
replyToSerialize := reply{
@@ -798,8 +678,6 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
rb, err = h.Initialize(req.Args)
case "OpenImage":
rb, err = h.OpenImage(req.Args)
case "OpenImageOptional":
rb, err = h.OpenImageOptional(req.Args)
case "CloseImage":
rb, err = h.CloseImage(req.Args)
case "GetManifest":
@@ -810,14 +688,10 @@ func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate
rb, err = h.GetFullConfig(req.Args)
case "GetBlob":
rb, err = h.GetBlob(req.Args)
case "GetLayerInfo":
rb, err = h.GetLayerInfo(req.Args)
case "FinishPipe":
rb, err = h.FinishPipe(req.Args)
case "Shutdown":
terminate = true
// NOTE: If you add a method here, you should very likely be bumping the
// const protocolVersion above.
default:
err = fmt.Errorf("unknown method: %s", req.Method)
}
@@ -831,7 +705,6 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
images: make(map[uint32]*openImage),
activePipes: make(map[uint32]*activePipe),
}
defer handler.close()
// Convert the socket FD passed by client into a net.FileConn
fd := os.NewFile(uintptr(opts.sockFd), "sock")

View File

@@ -19,8 +19,6 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/cli"
"github.com/containers/image/v5/pkg/cli/sigstore"
"github.com/containers/image/v5/signature/signer"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -38,7 +36,6 @@ type syncOptions struct {
retryOpts *retry.Options
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
signBySigstoreParamFile string // Sign the image using a sigstore signature per configuration in a param file
signBySigstorePrivateKey string // Sign the image using a sigstore private key
signPassphraseFile string // Path pointing to a passphrase file when signing
format commonFlag.OptionalString // Force conversion of the image to a specified format
@@ -49,7 +46,6 @@ type syncOptions struct {
dryRun bool // Don't actually copy anything, just output what it would have done
preserveDigests bool // Preserve digests during sync
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
appendSuffix string // Suffix to append to destination image tag
}
// repoDescriptor contains information of a single repository used as a sync source.
@@ -110,14 +106,12 @@ See skopeo-sync(1) for details.
flags := cmd.Flags()
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
flags.StringVar(&opts.signBySigstoreParamFile, "sign-by-sigstore", "", "Sign the image using a sigstore parameter file at `PATH`")
flags.StringVar(&opts.signBySigstorePrivateKey, "sign-by-sigstore-private-key", "", "Sign the image using a sigstore private key at `PATH`")
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
flags.StringVar(&opts.appendSuffix, "append-suffix", "", "String to append to DESTINATION tags")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
@@ -222,7 +216,15 @@ func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef refe
}
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
if err != nil {
return nil, fmt.Errorf("Error determining repository tags for repo %s: %w", name, err)
var unauthorizedForCredentials docker.ErrUnauthorizedForCredentials
if errors.As(err, &unauthorizedForCredentials) {
// Some registries may decide to block the "list all tags" endpoint.
// Gracefully allow the sync to continue in this case.
logrus.Warnf("Registry disallows tag list retrieval: %s", err)
tags = nil
} else {
return nil, fmt.Errorf("Error determining repository tags for image %s: %w", name, err)
}
}
return tags, nil
@@ -242,11 +244,7 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
for _, tag := range tags {
taggedRef, err := reference.WithTag(repoRef, tag)
if err != nil {
logrus.WithFields(logrus.Fields{
"repo": repoRef.Name(),
"tag": tag,
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
continue
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
}
ref, err := docker.NewReference(taggedRef)
if err != nil {
@@ -550,8 +548,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
}
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
imageListSelection := copy.CopySystemImage
if opts.all {
imageListSelection = copy.CopyAllImages
@@ -608,31 +604,13 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
}
passphrase = p
}
var signers []*signer.Signer
if opts.signBySigstoreParamFile != "" {
signer, err := sigstore.NewSignerFromParameterFile(opts.signBySigstoreParamFile, &sigstore.Options{
PrivateKeyPassphrasePrompt: func(keyFile string) (string, error) {
return promptForPassphrase(keyFile, os.Stdin, os.Stdout)
},
Stdin: os.Stdin,
Stdout: stdout,
})
if err != nil {
return fmt.Errorf("Error using --sign-by-sigstore: %w", err)
}
defer signer.Close()
signers = append(signers, signer)
}
options := copy.Options{
RemoveSignatures: opts.removeSignatures,
Signers: signers,
SignBy: opts.signByFingerprint,
SignPassphrase: passphrase,
SignBySigstorePrivateKeyFile: opts.signBySigstorePrivateKey,
SignSigstorePrivateKeyPassphrase: []byte(passphrase),
ReportWriter: stdout,
ReportWriter: os.Stdout,
DestinationCtx: destinationCtx,
ImageListSelection: imageListSelection,
PreserveDigests: opts.preserveDigests,
@@ -666,7 +644,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
destSuffix = path.Base(destSuffix)
}
destRef, err := destinationReference(path.Join(destination, destSuffix)+opts.appendSuffix, opts.destination)
destRef, err := destinationReference(path.Join(destination, destSuffix), opts.destination)
if err != nil {
return err
}

View File

@@ -10,7 +10,6 @@ import (
commonFlag "github.com/containers/common/pkg/flag"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
@@ -31,11 +30,11 @@ type errorShouldDisplayUsage struct {
// The error for closeErr is annotated with description (which is not a format string)
// Typical usage:
//
// defer func() {
// if err := something.Close(); err != nil {
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
// }
// }
// defer func() {
// if err := something.Close(); err != nil {
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
// }
// }
func noteCloseFailure(err error, description string, closeErr error) error {
// We dont accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
// This also makes it harder for a caller to do
@@ -245,7 +244,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
}
// imageDestOptions is a superset of imageOptions specialized for image destinations.
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport
@@ -254,13 +252,12 @@ type imageDestOptions struct {
compressionFormat string // Format to use for the compression
compressionLevel commonFlag.OptionalInt // Level to use for the compression
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
imageDestFlagPrefix string
}
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
opts := imageDestOptions{imageOptions: genericOptions}
fs := pflag.FlagSet{}
fs.AddFlagSet(&genericFlags)
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
@@ -298,19 +295,6 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
return ctx, err
}
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
// Every user should call this as part of handling the CLI
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
if destTransport.Name() != directory.Transport.Name() {
if opts.dirForceCompression {
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
}
if opts.dirForceDecompression {
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
}
}
}
func parseCreds(creds string) (string, string, error) {
if creds == "" {
return "", "", errors.New("credentials can't be empty")

View File

@@ -1,15 +0,0 @@
ARG BASE_FQIN=quay.io/coreos-assembler/fcos-buildroot:testing-devel
FROM $BASE_FQIN
# See 'Danger of using COPY and ADD instructions'
# at https://cirrus-ci.org/guide/docker-builder-vm/#dockerfile-as-a-ci-environment
# Provide easy way to force-invalidate image cache by .cirrus.yml change
ARG CIRRUS_IMAGE_VERSION
ENV CIRRUS_IMAGE_VERSION=$CIRRUS_IMAGE_VERSION
ADD https://sh.rustup.rs /var/tmp/rustup_installer.sh
RUN dnf erase -y rust && \
chmod +x /var/tmp/rustup_installer.sh && \
/var/tmp/rustup_installer.sh -y --default-toolchain stable --profile minimal
ENV PATH=/root/.cargo/bin:/root/.local/bin:/root/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin

View File

@@ -6,17 +6,6 @@
set -e
_EOL=20270501
if [[ $(date +%Y%m%d) -ge $_EOL ]]; then
die "As of $_EOL this branch is probably
no longer supported in RHEL 9.2/8.8, please
confirm this with RHEL Program Management. If so:
It should be removed from Cirrus-Cron,
the .cirrus.yml file removed, and
the VM images (manually) unmarked
'permanent=true'"
fi
# BEGIN Global export of all variables
set -a
@@ -66,6 +55,9 @@ _run_setup() {
# VM's come with the distro. skopeo package pre-installed
dnf erase -y skopeo
# Required for testing the SIF transport
dnf install -y fakeroot squashfs-tools
msg "Removing systemd-resolved from nsswitch.conf"
# /etc/resolv.conf is already set to bypass systemd-resolvd
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
@@ -123,19 +115,18 @@ _run_unit() {
make test-unit-local BUILDTAGS="$BUILDTAGS"
}
_podman_reset() {
# Ensure we start with a clean-slate
showrun podman system reset --force
}
_run_integration() {
_podman_reset
# Ensure we start with a clean-slate
podman system reset --force
make test-integration-local BUILDTAGS="$BUILDTAGS"
}
_run_system() {
_podman_reset
##### Note: Test MODIFIES THE HOST SETUP #####
# Ensure we start with a clean-slate
podman system reset --force
# Executes with containers required for testing.
make test-system-local BUILDTAGS="$BUILDTAGS"
}

View File

@@ -1,17 +1,4 @@
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
[comment]: <> ()
[comment]: <> (ANY changes made to this file, once commited/merged must)
[comment]: <> (be manually copy/pasted -in markdown- into the description)
[comment]: <> (field on Quay at the following locations:)
[comment]: <> ()
[comment]: <> (https://quay.io/repository/containers/skopeo)
[comment]: <> (https://quay.io/repository/skopeo/stable)
[comment]: <> (https://quay.io/repository/skopeo/testing)
[comment]: <> (https://quay.io/repository/skopeo/upstream)
[comment]: <> ()
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
<img src="https://cdn.rawgit.com/containers/skopeo/main/docs/skopeo.svg" width="250">
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
----

View File

@@ -16,11 +16,27 @@ FROM registry.fedoraproject.org/fedora:latest
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
RUN dnf -y update && \
rpm --setcaps shadow-utils 2>/dev/null && \
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
dnf -y copr enable rhcontainerbot/podman-next && \
dnf -y install skopeo \
--exclude container-selinux \
--enablerepo=updates-testing && \
dnf -y --enablerepo updates-testing --exclude container-selinux install \
make \
golang \
git \
go-md2man \
fuse-overlayfs \
fuse3 \
containers-common \
gpgme-devel \
libassuan-devel \
btrfs-progs-devel \
device-mapper-devel && \
mkdir /root/skopeo && \
git clone https://github.com/containers/skopeo \
/root/skopeo/src/github.com/containers/skopeo && \
export GOPATH=/root/skopeo && \
cd /root/skopeo/src/github.com/containers/skopeo && \
make bin/skopeo && \
make PREFIX=/usr install && \
rm -rf /root/skopeo/* && \
dnf -y remove git golang go-md2man make && \
dnf clean all && \
rm -rf /var/cache /var/log/dnf* /var/log/yum.*

View File

@@ -1,8 +1,8 @@
# This is a default registries.d configuration file. You may
# add to this file or create additional files in registries.d/.
#
# lookaside: for reading/writing simple signing signatures
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
# lookaside: indicates a location that is read and write
# lookaside-staging: indicates a location that is only for write
#
# lookaside and lookaside-staging take a value of the following:
# lookaside: {schema}://location
@@ -10,12 +10,10 @@
# For reading signatures, schema may be http, https, or file.
# For writing signatures, schema may only be file.
# The default locations are built-in, for both reading and writing:
# /var/lib/containers/sigstore for root, or
# ~/.local/share/containers/sigstore for non-root users.
# This is the default signature write location for docker registries.
default-docker:
# lookaside: https://…
# lookaside-staging: file:///…
# lookaside: file:///var/lib/containers/sigstore
lookaside-staging: file:///var/lib/containers/sigstore
# The 'docker' indicator here is the start of the configuration
# for docker registries.
@@ -23,6 +21,6 @@ default-docker:
# docker:
#
# privateregistry.com:
# lookaside: https://privateregistry.com/sigstore/
# lookaside: http://privateregistry.com/sigstore/
# lookaside-staging: /mnt/nfs/privateregistry/sigstore

View File

@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
**--preserve-digests**
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
Preserve the digests during copying. Fail if the digest cannot be preserved.
**--encrypt-layer** _ints_
@@ -93,11 +93,6 @@ Do not copy signatures, if any, from _source-image_. Necessary when copying a si
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
**--sign-by-sigstore** _param-file_
Add a sigstore signature based on the options in the specified containers sigstore signing parameter file, _param-file_.
See containers-sigstore-signing-params.yaml(5) for details about the file format.
**--sign-by-sigstore-private-key** _path_
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
@@ -219,12 +214,12 @@ The password to access the destination registry.
## EXAMPLES
To just copy an image from one registry to another:
```console
```sh
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
```
To copy the layers of the docker.io busybox image to a local directory:
```console
```sh
$ mkdir -p /var/lib/images/busybox
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
$ ls /var/lib/images/busybox/*
@@ -233,46 +228,42 @@ $ ls /var/lib/images/busybox/*
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
```
To create an archive consumable by `docker load` (but note that using a registry is almost always more efficient):
```console
$ skopeo copy docker://busybox:latest docker-archive:archive-file.tar:busybox:latest
```
To copy and sign an image:
```console
$ skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
```sh
# skopeo copy --sign-by dev@example.com containers-storage:example/busybox:streaming docker://example/busybox:gold
```
To encrypt an image:
```console
$ skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
```sh
skopeo copy docker://docker.io/library/nginx:1.17.8 oci:local_nginx:1.17.8
$ openssl genrsa -out private.key 1024
$ openssl rsa -in private.key -pubout > public.key
openssl genrsa -out private.key 1024
openssl rsa -in private.key -pubout > public.key
$ skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
skopeo copy --encryption-key jwe:./public.key oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
```
To decrypt an image:
```console
$ skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
```sh
skopeo copy --decryption-key ./private.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
```
To copy encrypted image without decryption:
```console
$ skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
```sh
skopeo copy oci:try-encrypt:encrypted oci:try-encrypt-copy:encrypted
```
To decrypt an image that requires more than one key:
```console
$ skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
```sh
skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
```
Container images can also be partially encrypted by specifying the index of the layer. Layers are 0-indexed indices, with support for negative indexing. i.e. 0 is the first layer, -1 is the last layer.
Let's say out of 3 layers that the image `docker.io/library/nginx:1.17.8` is made up of, we only want to encrypt the 2nd layer,
```console
$ skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
```sh
skopeo copy --encryption-key jwe:./public.key --encrypt-layer 1 oci:local_nginx:1.17.8 oci:try-encrypt:encrypted
```
## SEE ALSO

View File

@@ -85,7 +85,7 @@ The password to access the registry.
## EXAMPLES
Mark image example/pause for deletion from the registry.example.com registry:
```console
```sh
$ skopeo delete docker://registry.example.com/example/pause:latest
```
See above for additional details on using the command **delete**.

View File

@@ -1,47 +0,0 @@
% skopeo-generate-sigstore-key(1)
## NAME
skopeo\-generate-sigstore-key - Generate a sigstore public/private key pair.
## SYNOPSIS
**skopeo generate-sigstore-key** [*options*] **--output-prefix** _prefix_
## DESCRIPTION
Generates a public/private key pair suitable for creating sigstore image signatures.
The private key is encrypted with a passphrase;
if one is not provided using an option, this command prompts for it interactively.
The private key is written to _prefix_**.private** .
The private key is written to _prefix_**.pub** .
## OPTIONS
**--help**, **-h**
Print usage statement
**--output-prefix** _prefix_
Mandatory.
Path prefix for the output keys (_prefix_**.private** and _prefix_**.pub**).
**--passphrase-file** _path_
The passphare to use to encrypt the private key.
Only the first line will be read.
A passphrase stored in a file is of questionable security if other users can read this file.
Do not use this option if at all avoidable.
## EXAMPLES
```console
$ skopeo generate-sigstore-key --output-prefix mykey
```
# SEE ALSO
skopeo(1), skopeo-copy(1), containers-policy.json(5)
## AUTHORS
Miloslav Trmač <mitr@redhat.com>

View File

@@ -87,90 +87,74 @@ Do not list the available tags from the repository in the output. When `true`, t
## EXAMPLES
To review information for the image fedora from the docker.io registry:
```console
```sh
$ skopeo inspect docker://docker.io/fedora
{
"Name": "docker.io/library/fedora",
"Digest": "sha256:f99efcddc4dd6736d8a88cc1ab6722098ec1d77dbf7aed9a7a514fc997ca08e0",
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
"RepoTags": [
"20",
"21",
"..."
"20",
"21",
"22",
"23",
"24",
"heisenbug",
"latest",
"rawhide"
],
"Created": "2022-11-16T07:26:42.618327645Z",
"DockerVersion": "20.10.12",
"Labels": {
"maintainer": "Clement Verna \u003ccverna@fedoraproject.org\u003e"
},
"Created": "2016-06-20T19:33:43.220526898Z",
"DockerVersion": "1.10.3",
"Labels": {},
"Architecture": "amd64",
"Os": "linux",
"Layers": [
"sha256:cb8b1ed77979b894115a983f391465651aa7eb3edd036be4b508eea47271eb93"
],
"LayersData": [
{
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"Digest": "sha256:cb8b1ed77979b894115a983f391465651aa7eb3edd036be4b508eea47271eb93",
"Size": 65990920,
"Annotations": null
}
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"DISTTAG=f37container",
"FGC=f37",
"FBR=f37"
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
]
}
```
To inspect python from the docker.io registry and not show the available tags:
```console
```sh
$ skopeo inspect --no-tags docker://docker.io/library/python
{
"Name": "docker.io/library/python",
"Digest": "sha256:10fc14aa6ae69f69e4c953cffd9b0964843d8c163950491d2138af891377bc1d",
"Digest": "sha256:5ca194a80ddff913ea49c8154f38da66a41d2b73028c5cf7e46bc3c1d6fda572",
"RepoTags": [],
"Created": "2022-11-16T06:55:28.566254104Z",
"DockerVersion": "20.10.12",
"Created": "2021-10-05T23:40:54.936108045Z",
"DockerVersion": "20.10.7",
"Labels": null,
"Architecture": "amd64",
"Os": "linux",
"Layers": [
"sha256:a8ca11554fce00d9177da2d76307bdc06df7faeb84529755c648ac4886192ed1",
"sha256:e4e46864aba2e62ba7c75965e4aa33ec856ee1b1074dda6b478101c577b63abd",
"..."
],
"LayersData": [
{
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"Digest": "sha256:a8ca11554fce00d9177da2d76307bdc06df7faeb84529755c648ac4886192ed1",
"Size": 55038615,
"Annotations": null
},
{
"MIMEType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"Digest": "sha256:e4e46864aba2e62ba7c75965e4aa33ec856ee1b1074dda6b478101c577b63abd",
"Size": 5164893,
"Annotations": null
},
"..."
"sha256:df5590a8898bedd76f02205dc8caa5cc9863267dbcd8aac038bcd212688c1cc7",
"sha256:705bb4cb554eb7751fd21a994f6f32aee582fbe5ea43037db6c43d321763992b",
"sha256:519df5fceacdeaadeec563397b1d9f4d7c29c9f6eff879739cab6f0c144f49e1",
"sha256:ccc287cbeddc96a0772397ca00ec85482a7b7f9a9fac643bfddd87b932f743db",
"sha256:e3f8e6af58ed3a502f0c3c15dce636d9d362a742eb5b67770d0cfcb72f3a9884",
"sha256:aebed27b2d86a5a3a2cbe186247911047a7e432b9d17daad8f226597c0ea4276",
"sha256:54c32182bdcc3041bf64077428467109a70115888d03f7757dcf614ff6d95ebe",
"sha256:cc8b7caedab13af07adf4836e13af2d4e9e54d794129b0fd4c83ece6b1112e86",
"sha256:462c3718af1d5cdc050cfba102d06c26f78fe3b738ce2ca2eb248034b1738945"
],
"Env": [
"PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"LANG=C.UTF-8",
"...",
"GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D",
"PYTHON_VERSION=3.10.0",
"PYTHON_PIP_VERSION=21.2.4",
"PYTHON_SETUPTOOLS_VERSION=57.5.0",
"PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/d781367b97acf0ece7e9e304bf281e99b618bf10/public/get-pip.py",
"PYTHON_GET_PIP_SHA256=01249aa3e58ffb3e1686b7141b4e9aac4d398ef4ac3012ed9dff8dd9f685ffe0"
]
}
```
```console
```
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
amd64
```
```console
```
$ /bin/skopeo inspect --format '{{ .Env }}' docker://registry.access.redhat.com/ubi8
[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin container=oci]
```

View File

@@ -79,7 +79,7 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
### Docker Transport
To get the list of tags in the "fedora" repository from the docker.io registry (the repository name expands to "library/fedora" per docker transport canonical form):
```console
```sh
$ skopeo list-tags docker://docker.io/fedora
{
"Repository": "docker.io/library/fedora",
@@ -110,7 +110,7 @@ $ skopeo list-tags docker://docker.io/fedora
To list the tags in a local host docker/distribution registry on port 5000, in this case for the "fedora" repository:
```console
```sh
$ skopeo list-tags docker://localhost:5000/fedora
{
"Repository": "localhost:5000/fedora",
@@ -127,7 +127,7 @@ $ skopeo list-tags docker://localhost:5000/fedora
To list the tags in a local docker-archive file:
```console
```sh
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
{
"Tags": [
@@ -138,7 +138,7 @@ $ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
Also supports more than one tags in an archive:
```console
```sh
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
{
"Tags": [
@@ -150,7 +150,7 @@ $ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
Will include a source-index entry for each untagged image:
```console
```sh
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
{
"Tags": [

View File

@@ -57,41 +57,41 @@ Write more detailed information to stdout
## EXAMPLES
```console
```
$ skopeo login docker.io
Username: testuser
Password:
Login Succeeded!
```
```console
```
$ skopeo login -u testuser -p testpassword localhost:5000
Login Succeeded!
```
```console
```
$ skopeo login --authfile authdir/myauths.json docker.io
Username: testuser
Password:
Login Succeeded!
```
```console
```
$ skopeo login --tls-verify=false -u test -p test localhost:5000
Login Succeeded!
```
```console
```
$ skopeo login --cert-dir /etc/containers/certs.d/ -u foo -p bar localhost:5000
Login Succeeded!
```
```console
```
$ skopeo login -u testuser --password-stdin < testpassword.txt docker.io
Login Succeeded!
```
```console
```
$ echo $testpassword | skopeo login -u testuser --password-stdin docker.io
Login Succeeded!
```

View File

@@ -35,17 +35,17 @@ Require HTTPS and verify certificates when talking to the container registry or
## EXAMPLES
```console
```
$ skopeo logout docker.io
Remove login credentials for docker.io
```
```console
```
$ skopeo logout --authfile authdir/myauths.json docker.io
Remove login credentials for docker.io
```
```console
```
$ skopeo logout --all
Remove login credentials for all registries
```

View File

@@ -18,7 +18,7 @@ Print usage statement
## EXAMPLES
```console
```sh
$ skopeo manifest-digest manifest.json
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
```

View File

@@ -31,7 +31,7 @@ The passphare to use when signing with the key ID from `--sign-by`. Only the fir
## EXAMPLES
```console
```sh
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
$
```

View File

@@ -30,7 +30,7 @@ Print usage statement
## EXAMPLES
```console
```sh
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
```

View File

@@ -1,14 +1,14 @@
% skopeo-sync(1)
## NAME
skopeo\-sync - Synchronize images between registry repositories and local directories.
skopeo\-sync - Synchronize images between container registries and local directories.
## SYNOPSIS
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
## DESCRIPTION
Synchronize images between registry repoositories and local directories.
Synchronize images between container registries and local directories.
The synchronization is achieved by copying all the images found at _source_ to _destination_.
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
@@ -66,9 +66,7 @@ Print usage statement.
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
**--append-suffix** _tag-suffix_ String to append to destination tags.
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
@@ -76,11 +74,6 @@ Print usage statement.
Add a “simple signing” signature using that key ID for an image name corresponding to _destination-image_
**--sign-by-sigstore** _param-file_
Add a sigstore signature based on the options in the specified containers sigstore signing parameter file, _param-file_.
See containers-sigstore-signing-params.yaml(5) for details about the file format.
**--sign-by-sigstore-private-key** _path_
Add a sigstore signature using a private key at _path_ for an image name corresponding to _destination-image_
@@ -133,7 +126,7 @@ The password to access the destination registry.
## EXAMPLES
### Synchronizing to a local directory
```console
```
$ skopeo sync --src docker --dest dir registry.example.com/busybox /media/usb
```
Images are located at:
@@ -151,7 +144,7 @@ Images are located at:
/media/usb/busybox:1-glibc
```
Sync run
```console
```
$ skopeo sync --src dir --dest docker /media/usb/busybox:1-glibc my-registry.local.lan/test/
```
Destination registry content:
@@ -161,7 +154,7 @@ my-registry.local.lan/test/busybox 1-glibc
```
### Synchronizing to a local directory, scoped
```console
```
$ skopeo sync --src docker --dest dir --scoped registry.example.com/busybox /media/usb
```
Images are located at:
@@ -174,8 +167,8 @@ Images are located at:
```
### Synchronizing to a container registry
```console
$ skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
```
skopeo sync --src docker --dest docker registry.example.com/busybox my-registry.local.lan
```
Destination registry content:
```
@@ -184,8 +177,8 @@ registry.local.lan/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
```
### Synchronizing to a container registry keeping the repository
```console
$ skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
```
skopeo sync --src docker --dest docker registry.example.com/repo/busybox my-registry.local.lan/repo
```
Destination registry content:
```
@@ -193,16 +186,6 @@ REPO TAGS
registry.local.lan/repo/busybox 1-glibc, 1-musl, 1-ubuntu, ..., latest
```
### Synchronizing to a container registry with tag suffix
```console
$ skopeo sync --src docker --dest docker --append-suffix '-mirror' registry.example.com/busybox my-registry.local.lan
```
Destination registry content:
```
REPO TAGS
registry.local.lan/busybox 1-glibc-mirror, 1-musl-mirror, 1-ubuntu-mirror, ..., latest-mirror
```
### YAML file content (used _source_ for `**--src yaml**`)
```yaml
@@ -227,8 +210,8 @@ quay.io:
- latest
```
If the yaml filename is `sync.yml`, sync run:
```console
$ skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
```
skopeo sync --src yaml --dest docker sync.yml my-registry.local.lan/repo/
```
This will copy the following images:
- Repository `registry.example.com/busybox`: all images, as no tags are specified.

View File

@@ -47,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
**oci-archive:**_path_**:**_tag_
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
See [containers-transports(5)](https://github.com/containers/image/blob/main/docs/containers-transports.5.md) for details.
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
## OPTIONS
@@ -101,7 +101,6 @@ Print the version number
| ----------------------------------------- | ------------------------------------------------------------------------------ |
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
| [skopeo-generate-sigstore-key(1)](skopeo-generate-sigstore-key.1.md) | Generate a sigstore public/private key pair. |
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
@@ -109,16 +108,16 @@ Print the version number
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
## FILES
**/etc/containers/policy.json**
Default trust policy file, if **--policy** is not specified.
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md) .
The policy format is documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md) .
**/etc/containers/registries.d**
Default directory containing registry configuration, if **--registries.d** is not specified.
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md).
The contents of this directory are documented in [containers-policy.json(5)](https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md).
## SEE ALSO
skopeo-login(1), docker-login(1), containers-auth.json(5), containers-storage.conf(5), containers-policy.json(5), containers-transports(5)

140
go.mod
View File

@@ -3,134 +3,106 @@ module github.com/containers/skopeo
go 1.17
require (
github.com/containers/common v0.51.4
github.com/containers/image/v5 v5.24.3
github.com/containers/ocicrypt v1.1.10
github.com/containers/storage v1.45.3
github.com/docker/distribution v2.8.1+incompatible
github.com/containers/common v0.49.1
github.com/containers/image/v5 v5.22.1
github.com/containers/ocicrypt v1.1.5
github.com/containers/storage v1.42.0
github.com/docker/docker v20.10.17+incompatible
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc2
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
github.com/opencontainers/image-tools v1.0.0-rc3
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.1
github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/term v0.17.0
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0
)
require (
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.6 // indirect
github.com/BurntSushi/toml v1.2.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.3 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.5.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/docker v20.10.23+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
github.com/go-openapi/runtime v0.24.1 // indirect
github.com/go-openapi/spec v0.20.7 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.0 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-containerregistry v0.13.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-containerregistry v0.10.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/trillian v1.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/letsencrypt/boulder v0.0.0-20230130200452-c091e64aa391 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb // indirect
github.com/opencontainers/selinux v1.10.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/opencontainers/selinux v1.10.1 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.8.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.0.0 // indirect
github.com/sigstore/rekor v1.0.1 // indirect
github.com/sigstore/sigstore v1.5.2 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.9.0 // indirect
github.com/sylabs/sif/v2 v2.7.1 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 // indirect
github.com/theupdateframework/go-tuf v0.3.1 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.mongodb.org/mongo-driver v1.11.1 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.7.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/trace v1.3.0 // indirect
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

1579
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
echo " the Makefile targets WITHOUT the '-local' suffix."
echo "***************************************************************"
) > /dev/stderr
sleep 5
sleep 5s
fi
echo

View File

@@ -5,36 +5,16 @@ set -e
# not all storage drivers are supported in a container
# environment. Detect this and setup storage when
# running in a container.
#
# Paradoxically (FIXME: clean this up), SKOPEO_CONTAINER_TESTS is set
# both inside a container and without a container (in a CI VM); it actually means
# "it is safe to desctructively modify the system for tests".
#
# On a CI VM, we can just use Podman as it is already configured; the changes below,
# to use VFS, are necessary only inside a container, because overlay-inside-overlay
# does not work. So, make these changes conditional on both
# SKOPEO_CONTAINER_TESTS (for acceptability to do destructive modification) and !CI
# (for necessity to adjust for in-container operation)
if ((SKOPEO_CONTAINER_TESTS)) && [[ "$CI" != true ]]; then
if [[ -r /etc/containers/storage.conf ]]; then
echo "MODIFYING existing storage.conf"
sed -i \
-e 's/^driver\s*=.*/driver = "vfs"/' \
-e 's/^mountopt/#mountopt/' \
/etc/containers/storage.conf
else
echo "CREATING NEW storage.conf"
cat >> /etc/containers/storage.conf << EOF
if ((SKOPEO_CONTAINER_TESTS)) && [[ -r /etc/containers/storage.conf ]]; then
sed -i \
-e 's/^driver\s*=.*/driver = "vfs"/' \
-e 's/^mountopt/#mountopt/' \
/etc/containers/storage.conf
elif ((SKOPEO_CONTAINER_TESTS)); then
cat >> /etc/containers/storage.conf << EOF
[storage]
driver = "vfs"
runroot = "/run/containers/storage"
graphroot = "/var/lib/containers/storage"
EOF
fi
# The logic of finding the relevant storage.conf file is convoluted
# and in effect differs between Skopeo and Podman, at least in some versions;
# explicitly point at the file we want to use to hopefully avoid that.
export CONTAINERS_STORAGE_CONF=/etc/containers/storage.conf
fi
# Build skopeo, install into /usr/bin

View File

@@ -49,25 +49,25 @@ func (s *SkopeoSuite) TearDownSuite(c *check.C) {
//func skopeoCmd()
func (s *SkopeoSuite) TestVersion(c *check.C) {
assertSkopeoSucceeds(c, fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version),
"--version")
wanted := fmt.Sprintf(".*%s version %s.*", skopeoBinary, version.Version)
assertSkopeoSucceeds(c, wanted, "--version")
}
func (s *SkopeoSuite) TestCanAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
assertSkopeoFails(c, ".*manifest unknown.*",
"--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
wanted := ".*manifest unknown: manifest unknown.*"
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", "--creds="+s.regV2WithAuth.username+":"+s.regV2WithAuth.password, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
}
func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C) {
assertSkopeoFails(c, ".*authentication required.*",
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
wanted := ".*unauthorized: authentication required.*"
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
}
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
assertSkopeoFails(c, ".*unknown flag: --cert-path.*",
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
assertSkopeoFails(c, ".*authentication required.*",
"--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
wanted := ".*unknown flag: --cert-path.*"
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
wanted = ".*unauthorized: authentication required.*"
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
}
// TODO(runcom): as soon as we can push to registries ensure you can inspect here
@@ -75,8 +75,10 @@ func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) {
out, err := exec.Command(skopeoBinary, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2.url)).CombinedOutput()
c.Assert(err, check.NotNil, check.Commentf(string(out)))
c.Assert(string(out), check.Matches, "(?s).*manifest unknown.*") // (?s) : '.' will also match newlines
c.Assert(string(out), check.Not(check.Matches), "(?s).*unauthorized: authentication required.*") // (?s) : '.' will also match newlines
wanted := ".*manifest unknown.*"
c.Assert(string(out), check.Matches, "(?s)"+wanted) // (?s) : '.' will also match newlines
wanted = ".*unauthorized: authentication required.*"
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
}
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
@@ -84,28 +86,28 @@ func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
}
func (s *SkopeoSuite) TestLoginLogout(c *check.C) {
assertSkopeoSucceeds(c, "^Login Succeeded!\n$",
"login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
wanted := "^Login Succeeded!\n$"
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
// test --get-login returns username
assertSkopeoSucceeds(c, fmt.Sprintf("^%s\n$", s.regV2WithAuth.username),
"login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
wanted = fmt.Sprintf("^%s\n$", s.regV2WithAuth.username)
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--get-login", s.regV2WithAuth.url)
// test logout
assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url),
"logout", s.regV2WithAuth.url)
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
}
func (s *SkopeoSuite) TestCopyWithLocalAuth(c *check.C) {
assertSkopeoSucceeds(c, "^Login Succeeded!\n$",
"login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
wanted := "^Login Succeeded!\n$"
assertSkopeoSucceeds(c, wanted, "login", "--tls-verify=false", "--username="+s.regV2WithAuth.username, "--password="+s.regV2WithAuth.password, s.regV2WithAuth.url)
// copy to private registry using local authentication
imageName := fmt.Sprintf("docker://%s/busybox:mine", s.regV2WithAuth.url)
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", testFQIN+":latest", imageName)
// inspect from private registry
assertSkopeoSucceeds(c, "", "inspect", "--tls-verify=false", imageName)
// logout from the registry
assertSkopeoSucceeds(c, fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url),
"logout", s.regV2WithAuth.url)
wanted = fmt.Sprintf("^Removed login credentials for %s\n$", s.regV2WithAuth.url)
assertSkopeoSucceeds(c, wanted, "logout", s.regV2WithAuth.url)
// inspect from private registry should fail after logout
assertSkopeoFails(c, ".*authentication required.*",
"inspect", "--tls-verify=false", imageName)
wanted = ".*unauthorized: authentication required.*"
assertSkopeoFails(c, wanted, "inspect", "--tls-verify=false", imageName)
}

View File

@@ -31,8 +31,7 @@ const (
v2DockerRegistryURL = "localhost:5555" // Update also policy.json
v2s1DockerRegistryURL = "localhost:5556"
knownWindowsOnlyImage = "docker://mcr.microsoft.com/windows/nanoserver:1909"
knownListImageRepo = "docker://registry.fedoraproject.org/fedora-minimal"
knownListImage = knownListImageRepo + ":38"
knownListImage = "docker://registry.fedoraproject.org/fedora-minimal" // could have either ":latest" or "@sha256:..." appended
)
type CopySuite struct {
@@ -197,8 +196,8 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImageRepo+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir1, "oci:"+oci1)
assertSkopeoSucceeds(c, "", "copy", "dir:"+dir2, "oci:"+oci2)
out := combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
@@ -225,9 +224,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
}
@@ -241,9 +240,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
manifestDigest, err := manifest.Digest([]byte(m))
c.Assert(err, check.IsNil)
digest := manifestDigest.String()
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test@"+digest, "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", knownListImageRepo+"@"+digest, "dir:"+dir2)
assertSkopeoSucceeds(c, "", "copy", knownListImage+"@"+digest, "dir:"+dir2)
runDecompressDirs(c, "", dir1, dir2)
assertDirImagesAreEqual(c, dir1, dir2)
}
@@ -257,8 +256,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
digest := manifestDigest.String()
_, err = manifest.ListFromBlob([]byte(m), manifest.GuessMIMEType([]byte(m)))
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i2 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
@@ -281,8 +280,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+arm64Instance.String(), "containers-storage:"+storage+"test@"+arm64Instance.String())
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -315,8 +314,8 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
var image1 imgspecv1.Image
err = json.Unmarshal([]byte(i1), &image1)
@@ -349,9 +348,9 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
c.Assert(err, check.IsNil)
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+amd64Instance.String(), "containers-storage:"+storage+"test@"+amd64Instance.String())
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+amd64Instance.String())
var image1 imgspecv1.Image
@@ -384,7 +383,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
arm64Instance, err := list.ChooseInstance(&types.SystemContext{ArchitectureChoice: "arm64"})
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--override-arch=amd64", "copy", knownListImage, "containers-storage:"+storage+"test:latest")
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImageRepo+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoSucceeds(c, "", "--override-arch=arm64", "copy", knownListImage+"@"+digest, "containers-storage:"+storage+"test@"+digest)
assertSkopeoFails(c, `.*reading manifest for image instance.*does not exist.*`, "--override-arch=amd64", "inspect", "--config", "containers-storage:"+storage+"test@"+digest)
i1 := combinedOutputOfCommand(c, skopeoBinary, "--override-arch=arm64", "inspect", "--config", "containers-storage:"+storage+"test:latest")
var image1 imgspecv1.Image
@@ -448,7 +447,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause", "dir:"+dir1)
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause", "dir:"+dir1)
// "push": dir: → docker(v2s2):
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, ourRegistry+"pause:unsigned")
// The result of pushing and pulling is an unmodified image.
@@ -462,14 +461,14 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
ociDest := "pause-latest-image"
ociImgName := "pause"
defer os.RemoveAll(ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
_, err := os.Stat(ociDest)
c.Assert(err, check.IsNil)
// docker v2s2 -> OCI image layout without image name
ociDest = "pause-latest-noimage"
defer os.RemoveAll(ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://registry.k8s.io/pause:latest", "oci:"+ociDest)
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest)
_, err = os.Stat(ociDest)
c.Assert(err, check.IsNil)
}
@@ -1037,8 +1036,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
// and only the remapped prefix (mirror) is accessed.
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown.*",
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
assertSkopeoFails(c, ".*initializing source docker://localhost:5006/myns/mirroring-primary:remapped:.*manifest unknown: manifest unknown.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:remapped", dirDest)
}
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {

View File

@@ -11,7 +11,7 @@ import (
"strings"
"time"
"github.com/containers/storage/pkg/homedir"
"github.com/docker/docker/pkg/homedir"
"gopkg.in/check.v1"
)

View File

@@ -15,15 +15,11 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
in openshift.go and CopySuite to get an interactive environment for experimentation.
To use it, run:
sudo make shell
to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make bin/skopeo PREFIX=/usr install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json

View File

@@ -20,9 +20,6 @@ import (
// This image is known to be x86_64 only right now
const knownNotManifestListedImage_x8664 = "docker://quay.io/coreos/11bot"
// knownNotExtantImage would be very surprising if it did exist
const knownNotExtantImage = "docker://quay.io/centos/centos:opensusewindowsubuntu"
const expectedProxySemverMajor = "0.2"
// request is copied from proxy.go
@@ -243,29 +240,6 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
return fmt.Errorf("OpenImage return value is %T", v)
}
imgid := uint32(imgidv)
if imgid == 0 {
return fmt.Errorf("got zero from expected image")
}
// Also verify the optional path
v, err = p.callNoFd("OpenImageOptional", []interface{}{knownNotManifestListedImage_x8664})
if err != nil {
return err
}
imgidv, ok = v.(float64)
if !ok {
return fmt.Errorf("OpenImageOptional return value is %T", v)
}
imgid2 := uint32(imgidv)
if imgid2 == 0 {
return fmt.Errorf("got zero from expected image")
}
_, err = p.callNoFd("CloseImage", []interface{}{imgid2})
if err != nil {
return err
}
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
if err != nil {
@@ -318,23 +292,6 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
return nil
}
func runTestOpenImageOptionalNotFound(p *proxy, img string) error {
v, err := p.callNoFd("OpenImageOptional", []interface{}{img})
if err != nil {
return err
}
imgidv, ok := v.(float64)
if !ok {
return fmt.Errorf("OpenImageOptional return value is %T", v)
}
imgid := uint32(imgidv)
if imgid != 0 {
return fmt.Errorf("Unexpected optional image id %v", imgid)
}
return nil
}
func (s *ProxySuite) TestProxy(c *check.C) {
p, err := newProxy()
c.Assert(err, check.IsNil)
@@ -350,10 +307,4 @@ func (s *ProxySuite) TestProxy(c *check.C) {
err = fmt.Errorf("Testing image %s: %v", knownListImage, err)
}
c.Assert(err, check.IsNil)
err = runTestOpenImageOptionalNotFound(p, knownNotExtantImage)
if err != nil {
err = fmt.Errorf("Testing optional image %s: %v", knownNotExtantImage, err)
}
c.Assert(err, check.IsNil)
}

View File

@@ -116,7 +116,6 @@ func (t *testRegistryV2) Ping() error {
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode)
}

View File

@@ -22,15 +22,15 @@ const (
// A repository with a path with multiple components in it which
// contains multiple tags, preferably with some tags pointing to
// manifest lists, and with some tags that don't.
pullableRepo = "registry.k8s.io/coredns/coredns"
pullableRepo = "k8s.gcr.io/coredns/coredns"
// A tagged image in the repository that we can inspect and copy.
pullableTaggedImage = "registry.k8s.io/coredns/coredns:v1.6.6"
pullableTaggedImage = "k8s.gcr.io/coredns/coredns:v1.6.6"
// A tagged manifest list in the repository that we can inspect and copy.
pullableTaggedManifestList = "registry.k8s.io/coredns/coredns:v1.8.0"
pullableTaggedManifestList = "k8s.gcr.io/coredns/coredns:v1.8.0"
// A repository containing multiple tags, some of which are for
// manifest lists, and which includes a "latest" tag. We specify the
// name here without a tag.
pullableRepoWithLatestTag = "registry.k8s.io/pause"
pullableRepoWithLatestTag = "k8s.gcr.io/pause"
)
func init() {
@@ -305,7 +305,7 @@ func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
registry.k8s.io:
k8s.gcr.io:
images-by-tag-regex:
pause: ^[12]\.0$ # regex string test
`
@@ -325,7 +325,7 @@ func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
registry.k8s.io:
k8s.gcr.io:
images:
pause:
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
@@ -342,7 +342,7 @@ func (s *SyncSuite) TestYaml2Dir(c *check.C) {
dir1 := path.Join(tmpDir, "dir1")
yamlConfig := `
registry.k8s.io:
k8s.gcr.io:
images:
coredns/coredns:
- v1.8.0
@@ -534,7 +534,7 @@ func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
assertSkopeoFails(c, ".*No images to sync found in .*",
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
assertSkopeoFails(c, ".*Error determining repository tags for repo docker.io/library/hopefully_no_images_will_ever_be_called_like_this: fetching tags list: requested access to the resource is denied.*",
assertSkopeoFails(c, ".*No images to sync found in .*",
"sync", "--scoped", "--dest-tls-verify=false", "--src", "docker", "--dest", "docker", "hopefully_no_images_will_ever_be_called_like_this", v2DockerRegistryURL)
}
@@ -544,11 +544,11 @@ func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
tmpDir := c.MkDir()
//untagged
assertSkopeoFails(c, ".*StatusCode: 404.*",
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL, tmpDir)
//tagged
assertSkopeoFails(c, ".*StatusCode: 404.*",
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
"sync", "--scoped", "--src", "docker", "--dest", "dir", regURL+":thetag", tmpDir)
}
@@ -557,11 +557,11 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
tmpDir := c.MkDir()
//untagged
assertSkopeoFails(c, ".*requested access to the resource is denied.*",
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo, tmpDir)
//tagged
assertSkopeoFails(c, ".*requested access to the resource is denied.*",
assertSkopeoFails(c, ".*unauthorized: authentication required.*",
"sync", "--scoped", "--src", "docker", "--dest", "dir", repo+":thetag", tmpDir)
}
@@ -570,7 +570,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
tmpDir := c.MkDir()
//untagged
assertSkopeoFails(c, ".*repository name not known to registry.*",
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
"sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", repo, tmpDir)
//tagged

View File

@@ -19,7 +19,7 @@ const decompressDirsBinary = "./decompress-dirs.sh"
const testFQIN = "docker://quay.io/libpod/busybox" // tag left off on purpose, some tests need to add a special one
const testFQIN64 = "docker://quay.io/libpod/busybox:amd64"
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:latest" // multi-layer
const testFQINMultiLayer = "docker://quay.io/libpod/alpine_nginx:master" // multi-layer
// consumeAndLogOutputStream takes (f, err) from an exec.*Pipe(), and causes all output to it to be logged to c.
func consumeAndLogOutputStream(c *check.C, id string, f io.ReadCloser, err error) {

View File

@@ -16,29 +16,4 @@ function setup() {
expect_output --substring "skopeo version [0-9.]+"
}
@test "skopeo release isn't a development version" {
[[ "${RELEASE_TESTING:-false}" == "true" ]] || \
skip "Release testing may be enabled by setting \$RELEASE_TESTING = 'true'."
run_skopeo --version
# expect_output() doesn't support negative matching
if [[ "$output" =~ "dev" ]]; then
# This is a multi-line message, which may in turn contain multi-line
# output, so let's format it ourselves, readably
local -a output_split
readarray -t output_split <<<"$output"
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
printf "#| FAIL: $BATS_TEST_NAME\n" >&2
printf "#| unexpected: 'dev'\n" >&2
printf "#| actual: '%s'\n" "${output_split[0]}" >&2
local line
for line in "${output_split[@]:1}"; do
printf "#| > '%s'\n" "$line" >&2
done
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
false
fi
}
# vim: filetype=sh

View File

@@ -95,11 +95,10 @@ END_EXPECT
# is created by the make-noarch-manifest script in this directory.
img=docker://quay.io/libpod/notmyarch:20210121
# Get our host golang arch (what we're running on, according to golang).
# This assumes that skopeo arch matches host arch (which it always should).
# Buildah is used here because it depends less on the exact system config
# than podman - and all we're really after is the golang-flavored arch name.
arch=$(go env GOARCH)
# Get our host arch (what we're running on). This assumes that skopeo
# arch matches podman; it also assumes running podman >= April 2020
# (prior to that, the format keys were lower-case).
arch=$(podman info --format '{{.Host.Arch}}')
# By default, 'inspect' tries to match our host os+arch. This should fail.
run_skopeo 1 inspect $img

View File

@@ -50,7 +50,7 @@ function setup() {
local dir=$TESTDIR/dir
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
# zstd magic number
local magic=$(printf "\x28\xb5\x2f\xfd")

View File

@@ -8,40 +8,38 @@ load helpers
function setup() {
standard_setup
# Remove old/stale cred file
_cred_dir=$TESTDIR/credentials
export XDG_RUNTIME_DIR=$_cred_dir
mkdir -p $_cred_dir/containers
rm -f $_cred_dir/containers/auth.json
# Start authenticated registry with random password
testuser=testuser
testpassword=$(random_string 15)
start_registry --testuser=$testuser --testpassword=$testpassword --enable-delete=true reg
_cred_dir=$TESTDIR/credentials
# It is important to change XDG_RUNTIME_DIR only after we start the registry, otherwise it affects the path of $XDG_RUNTIME_DIR/netns maintained by Podman,
# making it imposible to clean up after ourselves.
export XDG_RUNTIME_DIR=$_cred_dir
mkdir -p $_cred_dir/containers
# Remove old/stale cred file
rm -f $_cred_dir/containers/auth.json
}
@test "auth: credentials on command line" {
# No creds
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/nonesuch
expect_output --substring "authentication required"
expect_output --substring "unauthorized: authentication required"
# Wrong user
run_skopeo 1 inspect --tls-verify=false --creds=baduser:badpassword \
docker://localhost:5000/nonesuch
expect_output --substring "authentication required"
expect_output --substring "unauthorized: authentication required"
# Wrong password
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:badpassword \
docker://localhost:5000/nonesuch
expect_output --substring "authentication required"
expect_output --substring "unauthorized: authentication required"
# Correct creds, but no such image
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:$testpassword \
docker://localhost:5000/nonesuch
expect_output --substring "manifest unknown"
expect_output --substring "manifest unknown: manifest unknown"
# These should pass
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
@@ -66,7 +64,7 @@ function setup() {
podman logout localhost:5000
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "authentication required"
expect_output --substring "unauthorized: authentication required"
}
@test "auth: copy with --src-creds and --dest-creds" {
@@ -96,7 +94,7 @@ function setup() {
# inspect without authfile: should fail
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "authentication required"
expect_output --substring "unauthorized: authentication required"
# inspect with authfile: should work
run_skopeo inspect --tls-verify=false --authfile $TESTDIR/test.auth docker://localhost:5000/busybox:mine

View File

@@ -10,7 +10,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-${TEST_SOURCE_DIR}/../bin/skopeo}
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
# Default image to run as a local registry
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2.8.2}
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
###############################################################################
# BEGIN setup/teardown

View File

@@ -21,9 +21,7 @@ type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of data in TOML format into a pointer v.
//
// See [Decoder] for a description of the decoding process.
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
func Unmarshal(data []byte, v interface{}) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
@@ -31,12 +29,13 @@ func Unmarshal(data []byte, v interface{}) error {
// Decode the TOML data in to the pointer v.
//
// See [Decoder] for a description of the decoding process.
// See the documentation on Decoder for a description of the decoding process.
func Decode(data string, v interface{}) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile reads the contents of a file and decodes it with [Decode].
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at path and decode it for you.
func DecodeFile(path string, v interface{}) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
@@ -49,7 +48,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
// You can use [PrimitiveDecode] to "manually" decode these values.
// You can use the PrimitiveDecode() function to "manually" decode these values.
//
// NOTE: The underlying representation of a `Primitive` value is subject to
// change. Do not rely on it.
@@ -71,15 +70,15 @@ const (
// Decoder decodes TOML data.
//
// TOML tables correspond to Go structs or maps; they can be used
// interchangeably, but structs offer better type safety.
// TOML tables correspond to Go structs or maps (dealer's choice they can be
// used interchangeably).
//
// TOML table arrays correspond to either a slice of structs or a slice of maps.
//
// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
// local timezone.
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
//
// [time.Duration] types are treated as nanoseconds if the TOML value is an
// time.Duration types are treated as nanoseconds if the TOML value is an
// integer, or they're parsed with time.ParseDuration() if they're strings.
//
// All other TOML types (float, string, int, bool and array) correspond to the
@@ -91,7 +90,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
// ### Key mapping
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
@@ -169,16 +168,17 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, rv)
}
// PrimitiveDecode is just like the other Decode* functions, except it decodes a
// TOML value that has already been parsed. Valid primitive values can *only* be
// obtained from values filled by the decoder functions, including this method.
// (i.e., v may contain more [Primitive] values.)
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by the
// Decode* functions with one exception: keys returned by the Undecoded method
// will only reflect keys that were decoded. Namely, any keys hidden behind a
// Primitive will be considered undecoded. Executing this method will update the
// undecoded keys in the meta data. (See the example.)
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()

View File

@@ -7,8 +7,8 @@ import (
"io/fs"
)
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
// [Decode].
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {

View File

@@ -1,11 +1,13 @@
// Package toml implements decoding and encoding of TOML files.
//
// This package supports TOML v1.0.0, as specified at https://toml.io
//
// There is also support for delaying decoding with the Primitive type, and
// querying the set of keys in a TOML document with the MetaData type.
//
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
// and can be used to verify if TOML document is valid. It can also be used to
// print the type of each key.
/*
Package toml implements decoding and encoding of TOML files.
This package supports TOML v1.0.0, as listed on https://toml.io
There is also support for delaying decoding with the Primitive type, and
querying the set of keys in a TOML document with the MetaData type.
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
print the type of each key.
*/
package toml

View File

@@ -79,12 +79,12 @@ type Marshaler interface {
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
// for [Decode].
// for the Decode* functions.
//
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
// representation.
//
// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
// encoding the value as custom TOML.
//
// If you want to write arbitrary binary data then you will need to use
@@ -130,7 +130,7 @@ func NewEncoder(w io.Writer) *Encoder {
}
}
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
// Encode writes a TOML representation of the Go value to the Encoder's writer.
//
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
@@ -261,7 +261,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
enc.eElement(reflect.ValueOf(v))
return
}
encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
}
switch rv.Kind() {
@@ -504,8 +504,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && enc.isEmpty(fieldVal) {
if opts.omitempty && isEmpty(fieldVal) {
continue
}
if opts.omitzero && isZero(fieldVal) {
@@ -649,26 +648,12 @@ func isZero(rv reflect.Value) bool {
return false
}
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Struct:
if rv.Type().Comparable() {
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
}
// Need to also check if all the fields are empty, otherwise something
// like this with uncomparable types will always return true:
//
// type a struct{ field b }
// type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ {
if !enc.isEmpty(rv.Field(i)) {
return false
}
}
return true
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
case reflect.Bool:
return !rv.Bool()
}
@@ -683,15 +668,16 @@ func (enc *Encoder) newline() {
// Write a key/value pair:
//
// key = <any value>
// key = <any value>
//
// This is also used for "k = v" in inline tables; so something like this will
// be written in three calls:
//
//───────────────────┐
// │ ┌───┐ ┌────┐│
// v v v v vv
// key = {k = 1, k2 = 2}
// ┌────────────────────┐
// │ ┌───┐ ┌────┐│
// v v v v vv
// key = {k = v, k2 = v2}
//
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)

View File

@@ -5,60 +5,57 @@ import (
"strings"
)
// ParseError is returned when there is an error parsing the TOML syntax such as
// invalid syntax, duplicate keys, etc.
// ParseError is returned when there is an error parsing the TOML syntax.
//
// For example invalid syntax, duplicate keys, etc.
//
// In addition to the error message itself, you can also print detailed location
// information with context by using [ErrorWithPosition]:
// information with context by using ErrorWithPosition():
//
// toml: error: Key 'fruit' was already created and cannot be used as an array.
// toml: error: Key 'fruit' was already created and cannot be used as an array.
//
// At line 4, column 2-7:
// At line 4, column 2-7:
//
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
//
// [ErrorWithUsage] can be used to print the above with some more detailed usage
// guidance:
// Furthermore, the ErrorWithUsage() can be used to print the above with some
// more detailed usage guidance:
//
// toml: error: newlines not allowed within inline tables
// toml: error: newlines not allowed within inline tables
//
// At line 1, column 18:
// At line 1, column 18:
//
// 1 | x = [{ key = 42 #
// ^
// 1 | x = [{ key = 42 #
// ^
//
// Error help:
// Error help:
//
// Inline tables must always be on a single line:
// Inline tables must always be on a single line:
//
// table = {key = 42, second = 43}
// table = {key = 42, second = 43}
//
// It is invalid to split them over multiple lines like so:
// It is invalid to split them over multiple lines like so:
//
// # INVALID
// table = {
// key = 42,
// second = 43
// }
// # INVALID
// table = {
// key = 42,
// second = 43
// }
//
// Use regular for this:
// Use regular for this:
//
// [table]
// key = 42
// second = 43
// [table]
// key = 42
// second = 43
type ParseError struct {
Message string // Short technical message.
Usage string // Longer message with usage guidance; may be blank.
Position Position // Position of the error
LastKey string // Last parsed key, may be blank.
// Line the error occurred.
//
// Deprecated: use [Position].
Line int
Line int // Line the error occurred. Deprecated: use Position.
err error
input string
@@ -86,7 +83,7 @@ func (pe ParseError) Error() string {
// ErrorWithUsage() returns the error with detailed location context.
//
// See the documentation on [ParseError].
// See the documentation on ParseError.
func (pe ParseError) ErrorWithPosition() string {
if pe.input == "" { // Should never happen, but just in case.
return pe.Error()
@@ -127,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string {
// ErrorWithUsage() returns the error with detailed location context and usage
// guidance.
//
// See the documentation on [ParseError].
// See the documentation on ParseError.
func (pe ParseError) ErrorWithUsage() string {
m := pe.ErrorWithPosition()
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {

View File

@@ -771,7 +771,7 @@ func lexRawString(lx *lexer) stateFn {
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning ''' has already been consumed and
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()

View File

@@ -71,7 +71,7 @@ func (md *MetaData) Keys() []Key {
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a [Primitive] value.
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
@@ -89,7 +89,7 @@ func (md *MetaData) Undecoded() []Key {
return undecoded
}
// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// values of this type.
type Key []string

View File

@@ -1 +0,0 @@
* text=auto eol=lf

View File

@@ -1,10 +1 @@
.vscode/
*.exe
# testing
testdata
# go workspaces
go.work
go.work.sum

View File

@@ -1,144 +0,0 @@
run:
skip-dirs:
- pkg/etw/sample
linters:
enable:
# style
- containedctx # struct contains a context
- dupl # duplicate code
- errname # erorrs are named correctly
- goconst # strings that should be constants
- godot # comments end in a period
- misspell
- nolintlint # "//nolint" directives are properly explained
- revive # golint replacement
- stylecheck # golint replacement, less configurable than revive
- unconvert # unnecessary conversions
- wastedassign
# bugs, performance, unused, etc ...
- contextcheck # function uses a non-inherited context
- errorlint # errors not wrapped for 1.13
- exhaustive # check exhaustiveness of enum switch statements
- gofmt # files are gofmt'ed
- gosec # security
- nestif # deeply nested ifs
- nilerr # returns nil even with non-nil error
- prealloc # slices that can be pre-allocated
- structcheck # unused struct fields
- unparam # unused function params
issues:
exclude-rules:
# err is very often shadowed in nested scopes
- linters:
- govet
text: '^shadow: declaration of "err" shadows declaration'
# ignore long lines for skip autogen directives
- linters:
- revive
text: "^line-length-limit: "
source: "^//(go:generate|sys) "
# allow unjustified ignores of error checks in defer statements
- linters:
- nolintlint
text: "^directive `//nolint:errcheck` should provide explanation"
source: '^\s*defer '
# allow unjustified ignores of error lints for io.EOF
- linters:
- nolintlint
text: "^directive `//nolint:errorlint` should provide explanation"
source: '[=|!]= io.EOF'
linters-settings:
govet:
enable-all: true
disable:
# struct order is often for Win32 compat
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
- fieldalignment
check-shadowing: true
nolintlint:
allow-leading-space: false
require-explanation: true
require-specific: true
revive:
# revive is more configurable than static check, so likely the preferred alternative to static-check
# (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
enable-all-rules:
true
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
rules:
# rules with required arguments
- name: argument-limit
disabled: true
- name: banned-characters
disabled: true
- name: cognitive-complexity
disabled: true
- name: cyclomatic
disabled: true
- name: file-header
disabled: true
- name: function-length
disabled: true
- name: function-result-limit
disabled: true
- name: max-public-structs
disabled: true
# geneally annoying rules
- name: add-constant # complains about any and all strings and integers
disabled: true
- name: confusing-naming # we frequently use "Foo()" and "foo()" together
disabled: true
- name: flag-parameter # excessive, and a common idiom we use
disabled: true
# general config
- name: line-length-limit
arguments:
- 140
- name: var-naming
arguments:
- []
- - CID
- CRI
- CTRD
- DACL
- DLL
- DOS
- ETW
- FSCTL
- GCS
- GMSA
- HCS
- HV
- IO
- LCOW
- LDAP
- LPAC
- LTSC
- MMIO
- NT
- OCI
- PMEM
- PWSH
- RX
- SACl
- SID
- SMB
- TX
- VHD
- VHDX
- VMID
- VPCI
- WCOW
- WIM
stylecheck:
checks:
- "all"
- "-ST1003" # use revive's var naming

View File

@@ -13,60 +13,16 @@ Please see the LICENSE file for licensing information.
## Contributing
This project welcomes contributions and suggestions.
Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
you have the right to, and actually do, grant us the rights to use your contribution.
For details, visit [Microsoft CLA](https://cla.microsoft.com).
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to
provide a CLA and decorate the PR appropriately (e.g., label, comment).
Simply follow the instructions provided by the bot.
You will only need to do this once across all repos using our CLA.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
Additionally, the pull request pipeline requires the following steps to be performed before
mergining.
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
### Code Sign-Off
We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
to certify they either authored the work themselves or otherwise have permission to use it in this project.
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
Please see [the developer certificate](https://developercertificate.org) for more info,
as well as to make sure that you can attest to the rules listed.
Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
### Linting
Code must pass a linting stage, which uses [`golangci-lint`][lint].
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
automatically with VSCode by adding the following to your workspace or folder settings:
```json
"go.lintTool": "golangci-lint",
"go.lintOnSave": "package",
```
Additional editor [integrations options are also available][lint-ide].
Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
```shell
# use . or specify a path to only lint a package
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
> golangci-lint run ./...
```
### Go Generate
The pipeline checks that auto-generated code, via `go generate`, are up to date.
This can be done for the entire repo:
```shell
> go generate ./...
```
## Code of Conduct
@@ -74,16 +30,8 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Special Thanks
Thanks to [natefinch][natefinch] for the inspiration for this library.
See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
[lint]: https://golangci-lint.run/
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
[natefinch]: https://github.com/natefinch
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.

View File

@@ -1,41 +0,0 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winio
@@ -8,12 +7,11 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
"golang.org/x/sys/windows"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
@@ -26,7 +24,7 @@ const (
BackupAlternateData
BackupLink
BackupPropertyData
BackupObjectId //revive:disable-line:var-naming ID, not Id
BackupObjectId
BackupReparseData
BackupSparseBlock
BackupTxfsData
@@ -36,16 +34,14 @@ const (
StreamSparseAttributes = uint32(8)
)
//nolint:revive // var-naming: ALL_CAPS
const (
WRITE_DAC = windows.WRITE_DAC
WRITE_OWNER = windows.WRITE_OWNER
ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
WRITE_DAC = 0x40000
WRITE_OWNER = 0x80000
ACCESS_SYSTEM_SECURITY = 0x1000000
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
//revive:disable-next-line:var-naming ID, not Id
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
@@ -53,8 +49,8 @@ type BackupHeader struct {
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
type win32StreamID struct {
StreamID uint32
type win32StreamId struct {
StreamId uint32
Attributes uint32
Size uint64
NameSize uint32
@@ -75,7 +71,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
if r.bytesLeft > 0 {
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
@@ -86,16 +82,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
r.bytesLeft = 0
}
}
if _, err := io.Copy(io.Discard, r); err != nil {
if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, err
}
}
var wsi win32StreamID
var wsi win32StreamId
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
Id: wsi.StreamID,
Id: wsi.StreamId,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
@@ -106,7 +102,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
}
hdr.Name = syscall.UTF16ToString(name)
}
if wsi.StreamID == BackupSparseBlock {
if wsi.StreamId == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
@@ -151,8 +147,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
wsi := win32StreamID{
StreamID: hdr.Id,
wsi := win32StreamId{
StreamId: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
@@ -207,7 +203,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
}
runtime.KeepAlive(r.f)
if bytesRead == 0 {
@@ -220,7 +216,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
_ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0
}
@@ -246,7 +242,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
}
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) {
@@ -259,7 +255,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
_ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0
}
@@ -275,13 +271,7 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32)
if err != nil {
return nil, err
}
h, err := syscall.CreateFile(&winPath[0],
access,
share,
nil,
createmode,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
0)
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
if err != nil {
err = &os.PathError{Op: "open", Path: path, Err: err}
return nil, err

View File

@@ -1,3 +1,4 @@
// +build !windows
// This file only exists to allow go get on non-Windows platforms.
package backuptar

View File

@@ -1,5 +1,3 @@
//go:build windows
package backuptar
import (

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package backuptar
@@ -8,6 +7,7 @@ import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
@@ -18,18 +18,17 @@ import (
"golang.org/x/sys/windows"
)
//nolint:deadcode,varcheck // keep unused constants for potential future use
const (
cISUID = 0004000 // Set uid
cISGID = 0002000 // Set gid
cISVTX = 0001000 // Save text (sticky bit)
cISDIR = 0040000 // Directory
cISFIFO = 0010000 // FIFO
cISREG = 0100000 // Regular file
cISLNK = 0120000 // Symbolic link
cISBLK = 0060000 // Block special file
cISCHR = 0020000 // Character special file
cISSOCK = 0140000 // Socket
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
@@ -45,7 +44,7 @@ const (
// zeroReader is an io.Reader that always returns 0s.
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
func (zr zeroReader) Read(b []byte) (int, error) {
for i := range b {
b[i] = 0
}
@@ -56,7 +55,7 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
@@ -72,8 +71,8 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err)
if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil {
return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err)
}
if bhdr.Size == 0 {
// A sparse block with size = 0 is used to mark the end of the sparse blocks.
@@ -107,7 +106,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= cISDIR
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
@@ -117,29 +116,32 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
// from the tar header and returns the security descriptor into a byte slice.
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err := base64.StdEncoding.DecodeString(sdraw)
// Maintaining old SDDL-based behavior for backward
// compatibility. All new tar headers written by this library
// will have raw binary for the security descriptor.
var sd []byte
var err error
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
// Not returning sd as-is in the error-case, as base64.DecodeString
// may return partially decoded data (not nil or empty slice) in case
// of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387
return nil, err
}
return sd, nil
}
// Maintaining old SDDL-based behavior for backward compatibility. All new
// tar headers written by this library will have raw binary for the security
// descriptor.
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
return winio.SddlToSecurityDescriptor(sddl)
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
}
}
return nil, nil
return sd, nil
}
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
// current file from the tar header and returns it as a byte slice.
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful
var eas []winio.ExtendedAttribute
var eadata []byte
var err error
for k, v := range hdr.PAXRecords {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
@@ -153,15 +155,13 @@ func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
Value: data,
})
}
var eaData []byte
var err error
if len(eas) != 0 {
eaData, err = winio.EncodeExtendedAttributes(eas)
eadata, err = winio.EncodeExtendedAttributes(eas)
if err != nil {
return nil, err
}
}
return eaData, nil
return eadata, nil
}
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
@@ -182,9 +182,11 @@ func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
//
// The additional Win32 metadata is:
//
// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := BasicInfoHeader(name, size, fileInfo)
@@ -207,7 +209,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
if err == io.EOF {
break
}
if err != nil {
@@ -215,21 +217,21 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= cISREG
hdr.Mode |= c_ISREG
if !readTwice {
dataHdr = bhdr
}
case winio.BackupSecurity:
sd, err := io.ReadAll(br)
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= cISLNK
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, _ := io.ReadAll(br)
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
@@ -240,7 +242,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
hdr.Linkname = rp.Target
case winio.BackupEaData:
eab, err := io.ReadAll(br)
eab, err := ioutil.ReadAll(br)
if err != nil {
return err
}
@@ -274,7 +276,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
if err == io.EOF {
break
}
if err != nil {
@@ -309,7 +311,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// range of the file containing the range contents. Finally there is a sparse block stream with
// size = 0 and offset = <file size>.
if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity
if dataHdr != nil {
// A data stream was found. Copy the data.
// We assume that we will either have a data stream size > 0 XOR have sparse block streams.
if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 {
@@ -317,13 +319,13 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
if _, err = io.Copy(t, br); err != nil {
return fmt.Errorf("%s: copying contents from data stream: %w", name, err)
return fmt.Errorf("%s: copying contents from data stream: %s", name, err)
}
} else if size > 0 {
// As of a recent OS change, BackupRead now returns a data stream for empty sparse files.
// These files have no sparse block streams, so skip the copySparse call if file size = 0.
if err = copySparse(t, br); err != nil {
return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err)
return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err)
}
}
}
@@ -333,7 +335,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF { //nolint:errorlint
if err == io.EOF {
break
}
if err != nil {
@@ -341,30 +343,35 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
switch bhdr.Id {
case winio.BackupAlternateData:
if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 {
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name)
}
altName := strings.TrimSuffix(bhdr.Name, ":$DATA")
hdr = &tar.Header{
Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
@@ -406,7 +413,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
}
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
}
return name, size, fileInfo, err
return
}
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
@@ -467,6 +474,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {

View File

@@ -1,22 +0,0 @@
// This package provides utilities for efficiently performing Win32 IO operations in Go.
// Currently, this package is provides support for genreal IO and management of
// - named pipes
// - files
// - [Hyper-V sockets]
//
// This code is similar to Go's [net] package, and uses IO completion ports to avoid
// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
//
// This limits support to Windows Vista and newer operating systems.
//
// Additionally, this package provides support for:
// - creating and managing GUIDs
// - writing to [ETW]
// - opening and manageing VHDs
// - parsing [Windows Image files]
// - auto-generating Win32 API code
//
// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
package winio

View File

@@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return ea, nb, err
return
}
nameOffset := fileFullEaInformationSize
@@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return ea, nb, err
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
@@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return ea, nb, err
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
@@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
eas = append(eas, ea)
b = nb
}
return eas, err
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {

View File

@@ -11,8 +11,6 @@ import (
"sync/atomic"
"syscall"
"time"
"golang.org/x/sys/windows"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
@@ -26,8 +24,6 @@ type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
func (b *atomicBool) swap(new bool) bool {
var newInt int32
if new {
@@ -36,6 +32,11 @@ func (b *atomicBool) swap(new bool) bool {
return atomic.SwapInt32((*int32)(b), newInt) == 1
}
const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
@@ -43,28 +44,28 @@ var (
type timeoutError struct{}
func (*timeoutError) Error() string { return "i/o timeout" }
func (*timeoutError) Timeout() bool { return true }
func (*timeoutError) Temporary() bool { return true }
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
// ioResult contains the result of an asynchronous IO operation.
// ioResult contains the result of an asynchronous IO operation
type ioResult struct {
bytes uint32
err error
}
// ioOperation represents an outstanding asynchronous Win32 IO.
// ioOperation represents an outstanding asynchronous Win32 IO
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
func initIO() {
func initIo() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
@@ -93,15 +94,15 @@ type deadlineHandler struct {
timedout atomicBool
}
// makeWin32File makes a new win32File from an existing file handle.
// makeWin32File makes a new win32File from an existing file handle
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
ioInitOnce.Do(initIO)
ioInitOnce.Do(initIo)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
@@ -120,14 +121,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return f, nil
}
// closeHandle closes the resources associated with a Win32 handle.
// closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() {
f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
if !f.closing.swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete
_ = cancelIoEx(f.handle, nil)
cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
@@ -143,14 +144,14 @@ func (f *win32File) Close() error {
return nil
}
// IsClosed checks if the file has been closed.
// IsClosed checks if the file has been closed
func (f *win32File) IsClosed() bool {
return f.closing.isSet()
}
// prepareIO prepares for a new IO operation.
// prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIO() (*ioOperation, error) {
func (f *win32File) prepareIo() (*ioOperation, error) {
f.wgLock.RLock()
if f.closing.isSet() {
f.wgLock.RUnlock()
@@ -163,7 +164,7 @@ func (f *win32File) prepareIO() (*ioOperation, error) {
return c, nil
}
// ioCompletionProcessor processes completed async IOs forever.
// ioCompletionProcessor processes completed async IOs forever
func ioCompletionProcessor(h syscall.Handle) {
for {
var bytes uint32
@@ -177,17 +178,15 @@ func ioCompletionProcessor(h syscall.Handle) {
}
}
// todo: helsaawy - create an asyncIO version that takes a context
// asyncIO processes the return value from ReadFile or WriteFile, blocking until
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING {
return int(bytes), err
}
if f.closing.isSet() {
_ = cancelIoEx(f.handle, &c.o)
cancelIoEx(f.handle, &c.o)
}
var timeout timeoutChan
@@ -201,7 +200,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
select {
case r = <-c.ch:
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing.isSet() {
err = ErrFileClosed
}
@@ -211,10 +210,10 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
_ = cancelIoEx(f.handle, &c.o)
cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
if err == syscall.ERROR_OPERATION_ABORTED {
err = ErrTimeout
}
}
@@ -222,14 +221,13 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
// todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
runtime.KeepAlive(c)
return int(r.bytes), err
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
c, err := f.prepareIO()
c, err := f.prepareIo()
if err != nil {
return 0, err
}
@@ -241,13 +239,13 @@ func (f *win32File) Read(b []byte) (int, error) {
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
} else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
} else if err == syscall.ERROR_BROKEN_PIPE {
return 0, io.EOF
} else {
return n, err
@@ -256,7 +254,7 @@ func (f *win32File) Read(b []byte) (int, error) {
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
c, err := f.prepareIO()
c, err := f.prepareIo()
if err != nil {
return 0, err
}
@@ -268,7 +266,7 @@ func (f *win32File) Write(b []byte) (int, error) {
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winio
@@ -15,18 +14,13 @@ import (
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
FileAttributes uint32
_ uint32 // padding
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := windows.GetFileInformationByHandleEx(
windows.Handle(f.Fd()),
windows.FileBasicInfo,
(*byte)(unsafe.Pointer(bi)),
uint32(unsafe.Sizeof(*bi)),
); err != nil {
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -35,12 +29,7 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := windows.SetFileInformationByHandle(
windows.Handle(f.Fd()),
windows.FileBasicInfo,
(*byte)(unsafe.Pointer(bi)),
uint32(unsafe.Sizeof(*bi)),
); err != nil {
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -59,10 +48,7 @@ type FileStandardInfo struct {
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
windows.FileStandardInfo,
(*byte)(unsafe.Pointer(si)),
uint32(unsafe.Sizeof(*si))); err != nil {
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -79,12 +65,7 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
if err := windows.GetFileInformationByHandleEx(
windows.Handle(f.Fd()),
windows.FileIdInfo,
(*byte)(unsafe.Pointer(fileID)),
uint32(unsafe.Sizeof(*fileID)),
); err != nil {
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)

View File

@@ -4,8 +4,6 @@
package winio
import (
"context"
"errors"
"fmt"
"io"
"net"
@@ -14,87 +12,16 @@ import (
"time"
"unsafe"
"golang.org/x/sys/windows"
"github.com/Microsoft/go-winio/internal/socket"
"github.com/Microsoft/go-winio/pkg/guid"
)
const afHVSock = 34 // AF_HYPERV
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
// Well known Service and VM IDs
//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
const (
afHvSock = 34 // AF_HYPERV
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
return guid.GUID{}
}
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
return guid.GUID{
Data1: 0xffffffff,
Data2: 0xffff,
Data3: 0xffff,
Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
}
}
// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
return guid.GUID{
Data1: 0xe0e16197,
Data2: 0xdd56,
Data3: 0x4a10,
Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
}
}
// HvsockGUIDSiloHost is the address of a silo's host partition:
// - The silo host of a hosted silo is the utility VM.
// - The silo host of a silo on a physical host is the physical host.
func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
return guid.GUID{
Data1: 0x36bd0c5c,
Data2: 0x7276,
Data3: 0x4223,
Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
}
}
// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
return guid.GUID{
Data1: 0x90db8b89,
Data2: 0xd35,
Data3: 0x4f79,
Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
}
}
// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
// Listening on this VmId accepts connection from:
// - Inside silos: silo host partition.
// - Inside hosted silo: host of the VM.
// - Inside VM: VM host.
// - Physical host: Not supported.
func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
return guid.GUID{
Data1: 0xa42e7cda,
Data2: 0xd03f,
Data3: 0x480c,
Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
}
}
// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
return guid.GUID{
Data2: 0xfacb,
Data3: 0x11e6,
Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
}
}
socketError = ^uintptr(0)
)
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
@@ -109,10 +36,8 @@ type rawHvsockAddr struct {
ServiceID guid.GUID
}
var _ socket.RawSockaddr = &rawHvsockAddr{}
// Network returns the address's network name, "hvsock".
func (*HvsockAddr) Network() string {
func (addr *HvsockAddr) Network() string {
return "hvsock"
}
@@ -122,14 +47,14 @@ func (addr *HvsockAddr) String() string {
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
g := hvsockVsockServiceTemplate() // make a copy
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
Family: afHVSock,
Family: afHvSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
@@ -140,48 +65,20 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.ServiceID = raw.ServiceID
}
// Sockaddr returns a pointer to and the size of this struct.
//
// Implements the [socket.RawSockaddr] interface, and allows use in
// [socket.Bind] and [socket.ConnectEx].
func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
}
// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
func (r *rawHvsockAddr) FromBytes(b []byte) error {
n := int(unsafe.Sizeof(rawHvsockAddr{}))
if len(b) < n {
return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
}
copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
if r.Family != afHVSock {
return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
}
return nil
}
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
var _ net.Listener = &HvsockListener{}
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
var _ net.Conn = &HvsockConn{}
func newHVSocket() (*win32File, error) {
fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
func newHvSocket() (*win32File, error) {
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
@@ -197,12 +94,12 @@ func newHVSocket() (*win32File, error) {
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
sock, err := newHVSocket()
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
sa := addr.raw()
err = socket.Bind(windows.Handle(sock.handle), &sa)
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
@@ -224,7 +121,7 @@ func (l *HvsockListener) Addr() net.Addr {
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock, err := newHVSocket()
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
@@ -233,42 +130,27 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock.Close()
}
}()
c, err := l.sock.prepareIO()
c, err := l.sock.prepareIo()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
//
// https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
_, err = l.sock.asyncIo(c, nil, bytes, err)
if err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
conn := &HvsockConn{
sock: sock,
}
// The local address returned in the AcceptEx buffer is the same as the Listener socket's
// address. However, the service GUID reported by GetSockName is different from the Listeners
// socket, and is sometimes the same as the local address of the socket that dialed the
// address, with the service GUID.Data1 incremented, but othertimes is different.
// todo: does the local address matter? is the listener's address or the actual address appropriate?
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
// initialize the accepted socket and update its properties with those of the listening socket
if err = windows.Setsockopt(windows.Handle(sock.handle),
windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
(*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
}
sock = nil
return conn, nil
}
@@ -278,171 +160,43 @@ func (l *HvsockListener) Close() error {
return l.sock.Close()
}
// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
type HvsockDialer struct {
// Deadline is the time the Dial operation must connect before erroring.
Deadline time.Time
// Retries is the number of additional connects to try if the connection times out, is refused,
// or the host is unreachable
Retries uint
// RetryWait is the time to wait after a connection error to retry
RetryWait time.Duration
rt *time.Timer // redial wait timer
}
// Dial the Hyper-V socket at addr.
//
// See [HvsockDialer.Dial] for more information.
func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
return (&HvsockDialer{}).Dial(ctx, addr)
}
// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
// retries.
//
// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
op := "dial"
// create the conn early to use opErr()
conn = &HvsockConn{
remote: *addr,
}
if !d.Deadline.IsZero() {
var cancel context.CancelFunc
ctx, cancel = context.WithDeadline(ctx, d.Deadline)
defer cancel()
}
// preemptive timeout/cancellation check
if err = ctx.Err(); err != nil {
return nil, conn.opErr(op, err)
}
sock, err := newHVSocket()
/* Need to finish ConnectEx handling
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
sock, err := newHvSocket()
if err != nil {
return nil, conn.opErr(op, err)
return nil, err
}
defer func() {
if sock != nil {
sock.Close()
}
}()
sa := addr.raw()
err = socket.Bind(windows.Handle(sock.handle), &sa)
c, err := sock.prepareIo()
if err != nil {
return nil, conn.opErr(op, os.NewSyscallError("bind", err))
}
c, err := sock.prepareIO()
if err != nil {
return nil, conn.opErr(op, err)
return nil, err
}
defer sock.wg.Done()
var bytes uint32
for i := uint(0); i <= d.Retries; i++ {
err = socket.ConnectEx(
windows.Handle(sock.handle),
&sa,
nil, // sendBuf
0, // sendDataLen
&bytes,
(*windows.Overlapped)(unsafe.Pointer(&c.o)))
_, err = sock.asyncIO(c, nil, bytes, err)
if i < d.Retries && canRedial(err) {
if err = d.redialWait(ctx); err == nil {
continue
}
}
break
}
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
if err != nil {
return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
return nil, err
}
// update the connection properties, so shutdown can be used
if err = windows.Setsockopt(
windows.Handle(sock.handle),
windows.SOL_SOCKET,
windows.SO_UPDATE_CONNECT_CONTEXT,
nil, // optvalue
0, // optlen
); err != nil {
return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
conn := &HvsockConn{
sock: sock,
remote: *addr,
}
// get the local name
var sal rawHvsockAddr
err = socket.GetSockName(windows.Handle(sock.handle), &sal)
if err != nil {
return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
}
conn.local.fromRaw(&sal)
// one last check for timeout, since asyncIO doesn't check the context
if err = ctx.Err(); err != nil {
return nil, conn.opErr(op, err)
}
conn.sock = sock
sock = nil
return conn, nil
}
// redialWait waits before attempting to redial, resetting the timer as appropriate.
func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
if d.RetryWait == 0 {
return nil
}
if d.rt == nil {
d.rt = time.NewTimer(d.RetryWait)
} else {
// should already be stopped and drained
d.rt.Reset(d.RetryWait)
}
select {
case <-ctx.Done():
case <-d.rt.C:
return nil
}
// stop and drain the timer
if !d.rt.Stop() {
<-d.rt.C
}
return ctx.Err()
}
// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
func canRedial(err error) bool {
//nolint:errorlint // guaranteed to be an Errno
switch err {
case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
return true
default:
return false
}
}
*/
func (conn *HvsockConn) opErr(op string, err error) error {
// translate from "file closed" to "socket closed"
if errors.Is(err, ErrFileClosed) {
err = socket.ErrSocketClosed
}
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
c, err := conn.sock.prepareIO()
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("read", err)
}
@@ -450,11 +204,10 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
var eno windows.Errno
if errors.As(err, &eno) {
err = os.NewSyscallError("wsarecv", eno)
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsarecv", err)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
@@ -477,7 +230,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) {
}
func (conn *HvsockConn) write(b []byte) (int, error) {
c, err := conn.sock.prepareIO()
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("write", err)
}
@@ -485,11 +238,10 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
var eno windows.Errno
if errors.As(err, &eno) {
err = os.NewSyscallError("wsasend", eno)
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsasend", err)
}
return 0, conn.opErr("write", err)
}
@@ -505,19 +257,13 @@ func (conn *HvsockConn) IsClosed() bool {
return conn.sock.IsClosed()
}
// shutdown disables sending or receiving on a socket.
func (conn *HvsockConn) shutdown(how int) error {
if conn.IsClosed() {
return socket.ErrSocketClosed
return ErrFileClosed
}
err := syscall.Shutdown(conn.sock.handle, how)
if err != nil {
// If the connection was closed, shutdowns fail with "not connected"
if errors.Is(err, windows.WSAENOTCONN) ||
errors.Is(err, windows.WSAESHUTDOWN) {
err = socket.ErrSocketClosed
}
return os.NewSyscallError("shutdown", err)
}
return nil
@@ -527,7 +273,7 @@ func (conn *HvsockConn) shutdown(how int) error {
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
return conn.opErr("closeread", err)
return conn.opErr("close", err)
}
return nil
}
@@ -537,7 +283,7 @@ func (conn *HvsockConn) CloseRead() error {
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
return conn.opErr("closewrite", err)
return conn.opErr("close", err)
}
return nil
}
@@ -554,13 +300,8 @@ func (conn *HvsockConn) RemoteAddr() net.Addr {
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
// todo: implement `SetDeadline` for `win32File`
if err := conn.SetReadDeadline(t); err != nil {
return fmt.Errorf("set read deadline: %w", err)
}
if err := conn.SetWriteDeadline(t); err != nil {
return fmt.Errorf("set write deadline: %w", err)
}
conn.SetReadDeadline(t)
conn.SetWriteDeadline(t)
return nil
}

View File

@@ -1,20 +0,0 @@
package socket
import (
"unsafe"
)
// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
// struct must meet the Win32 sockaddr requirements specified here:
// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
//
// Specifically, the struct size must be least larger than an int16 (unsigned short)
// for the address family.
type RawSockaddr interface {
// Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
// for the RawSockaddr's data to be overwritten by syscalls (if necessary).
//
// It is the callers responsibility to validate that the values are valid; invalid
// pointers or size can cause a panic.
Sockaddr() (unsafe.Pointer, int32, error)
}

View File

@@ -1,179 +0,0 @@
//go:build windows
package socket
import (
"errors"
"fmt"
"net"
"sync"
"syscall"
"unsafe"
"github.com/Microsoft/go-winio/pkg/guid"
"golang.org/x/sys/windows"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
const socketError = uintptr(^uint32(0))
var (
// todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
ErrBufferSize = errors.New("buffer size")
ErrAddrFamily = errors.New("address family")
ErrInvalidPointer = errors.New("invalid pointer")
ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
)
// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
func GetSockName(s windows.Handle, rsa RawSockaddr) error {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
// although getsockname returns WSAEFAULT if the buffer is too small, it does not set
// &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
return getsockname(s, ptr, &l)
}
// GetPeerName returns the remote address the socket is connected to.
//
// See [GetSockName] for more information.
func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
return getpeername(s, ptr, &l)
}
func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
ptr, l, err := rsa.Sockaddr()
if err != nil {
return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
}
return bind(s, ptr, l)
}
// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
// their sockaddr interface, so they cannot be used with HvsockAddr
// Replicate functionality here from
// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
// runtime via a WSAIoctl call:
// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
type runtimeFunc struct {
id guid.GUID
once sync.Once
addr uintptr
err error
}
func (f *runtimeFunc) Load() error {
f.once.Do(func() {
var s windows.Handle
s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
if f.err != nil {
return
}
defer windows.CloseHandle(s) //nolint:errcheck
var n uint32
f.err = windows.WSAIoctl(s,
windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
(*byte)(unsafe.Pointer(&f.id)),
uint32(unsafe.Sizeof(f.id)),
(*byte)(unsafe.Pointer(&f.addr)),
uint32(unsafe.Sizeof(f.addr)),
&n,
nil, //overlapped
0, //completionRoutine
)
})
return f.err
}
var (
// todo: add `AcceptEx` and `GetAcceptExSockaddrs`
WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
Data1: 0x25a207b9,
Data2: 0xddf3,
Data3: 0x4660,
Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
}
connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
)
func ConnectEx(
fd windows.Handle,
rsa RawSockaddr,
sendBuf *byte,
sendDataLen uint32,
bytesSent *uint32,
overlapped *windows.Overlapped,
) error {
if err := connectExFunc.Load(); err != nil {
return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
}
ptr, n, err := rsa.Sockaddr()
if err != nil {
return err
}
return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
}
// BOOL LpfnConnectex(
// [in] SOCKET s,
// [in] const sockaddr *name,
// [in] int namelen,
// [in, optional] PVOID lpSendBuffer,
// [in] DWORD dwSendDataLength,
// [out] LPDWORD lpdwBytesSent,
// [in] LPOVERLAPPED lpOverlapped
// )
func connectEx(
s windows.Handle,
name unsafe.Pointer,
namelen int32,
sendBuf *byte,
sendDataLen uint32,
bytesSent *uint32,
overlapped *windows.Overlapped,
) (err error) {
// todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
7,
uintptr(s),
uintptr(name),
uintptr(namelen),
uintptr(unsafe.Pointer(sendBuf)),
uintptr(sendDataLen),
uintptr(unsafe.Pointer(bytesSent)),
uintptr(unsafe.Pointer(overlapped)),
0,
0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return err
}

View File

@@ -1,72 +0,0 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package socket
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return errERROR_EINVAL
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
procbind = modws2_32.NewProc("bind")
procgetpeername = modws2_32.NewProc("getpeername")
procgetsockname = modws2_32.NewProc("getsockname")
)
func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
}
return
}
func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
if r1 == socketError {
err = errnoErr(e1)
}
return
}
func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
if r1 == socketError {
err = errnoErr(e1)
}
return
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winio
@@ -14,8 +13,6 @@ import (
"syscall"
"time"
"unsafe"
"golang.org/x/sys/windows"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
@@ -24,10 +21,10 @@ import (
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
type ioStatusBlock struct {
Status, Information uintptr
@@ -54,22 +51,45 @@ type securityDescriptor struct {
Control uint16
Owner uintptr
Group uintptr
Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
Sacl uintptr
Dacl uintptr
}
type ntStatus int32
type ntstatus int32
func (status ntStatus) Err() error {
func (status ntstatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_NO_DATA = syscall.Errno(232)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
cFILE_OPEN = 1
cFILE_CREATE = 2
cFILE_PIPE_MESSAGE_TYPE = 1
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
cSE_DACL_PRESENT = 4
)
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
ErrPipeListenerClosed = net.ErrClosed
// This error should match net.errClosing since docker takes a dependency on its text.
ErrPipeListenerClosed = errors.New("use of closed network connection")
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
@@ -96,10 +116,9 @@ func (f *win32Pipe) RemoteAddr() net.Addr {
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
if err := f.SetReadDeadline(t); err != nil {
return err
}
return f.SetWriteDeadline(t)
f.SetReadDeadline(t)
f.SetWriteDeadline(t)
return nil
}
// CloseWrite closes the write side of a message pipe in byte mode.
@@ -138,14 +157,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
if err == io.EOF { //nolint:errorlint
if err == io.EOF {
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
} else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
} else if err == syscall.ERROR_MORE_DATA {
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
@@ -154,7 +173,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return n, err
}
func (pipeAddress) Network() string {
func (s pipeAddress) Network() string {
return "pipe"
}
@@ -165,21 +184,16 @@ func (s pipeAddress) String() string {
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path,
access,
0,
nil,
syscall.OPEN_EXISTING,
windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
0)
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err == nil {
return h, nil
}
if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
if err != cERROR_PIPE_BUSY {
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
@@ -199,10 +213,9 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
} else {
absTimeout = time.Now().Add(2 * time.Second)
}
ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
conn, err := DialPipeContext(ctx, path)
if errors.Is(err, context.DeadlineExceeded) {
if err == context.DeadlineExceeded {
return nil, ErrTimeout
}
return conn, err
@@ -238,7 +251,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
if flags&windows.PIPE_TYPE_MESSAGE != 0 {
if flags&cPIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
@@ -270,11 +283,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
if err := rtlDosPathNameToNtPathName(&path16[0],
&ntPath,
0,
0,
).Err(); err != nil {
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
defer localFree(ntPath.Buffer)
@@ -283,8 +292,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
l := uint32(len(sd))
sdb := localAlloc(0, l)
len := uint32(len(sd))
sdb := localAlloc(0, len)
defer localFree(sdb)
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
@@ -292,28 +301,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
}
defer localFree(dacl)
sdb := &securityDescriptor{
Revision: 1,
Control: windows.SE_DACL_PRESENT,
Control: cSE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
typ |= windows.FILE_PIPE_MESSAGE_TYPE
typ |= cFILE_PIPE_MESSAGE_TYPE
}
disposition := uint32(windows.FILE_OPEN)
disposition := uint32(cFILE_OPEN)
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
if first {
disposition = windows.FILE_CREATE
disposition = cFILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
@@ -326,20 +335,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
h syscall.Handle
iosb ioStatusBlock
)
err = ntCreateNamedPipeFile(&h,
access,
&oa,
&iosb,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
disposition,
0,
typ,
0,
0,
0xffffffff,
uint32(c.InputBufferSize),
uint32(c.OutputBufferSize),
&timeout).Err()
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
@@ -384,7 +380,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
}
@@ -406,12 +402,12 @@ func (l *win32PipeListener) listenerRoutine() {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
if err != cERROR_NO_DATA {
break
}
}
responseCh <- acceptResponse{p, err}
closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
closed = err == ErrPipeListenerClosed
}
}
syscall.Close(l.firstHandle)
@@ -473,15 +469,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
}
func connectPipe(p *win32File) error {
c, err := p.prepareIO()
c, err := p.prepareIo()
if err != nil {
return err
}
defer p.wg.Done()
err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIO(c, nil, 0, err)
if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
_, err = p.asyncIo(c, nil, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED {
return err
}
return nil

View File

@@ -1,3 +1,5 @@
// +build windows
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
@@ -7,26 +9,24 @@ package guid
import (
"crypto/rand"
"crypto/sha1" //nolint:gosec // not used for secure application
"crypto/sha1"
"encoding"
"encoding/binary"
"fmt"
"strconv"
)
//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
// The variants specified by RFC 4122 section 4.1.1.
// The variants specified by RFC 4122.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
VariantRFC4122 // RFC 4122
VariantRFC4122
VariantMicrosoft
VariantFuture
)
@@ -36,10 +36,6 @@ const (
// hash of an input string.
type Version uint8
func (v Version) String() string {
return strconv.FormatUint(uint64(v), 10)
}
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
@@ -65,7 +61,7 @@ func NewV4() (GUID, error) {
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
b := sha1.New() //nolint:gosec // not used for secure application
b := sha1.New()
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)

View File

@@ -1,4 +1,3 @@
//go:build !windows
// +build !windows
package guid

View File

@@ -1,6 +1,3 @@
//go:build windows
// +build windows
package guid
import "golang.org/x/sys/windows"

View File

@@ -1,27 +0,0 @@
// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
package guid
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[VariantUnknown-0]
_ = x[VariantNCS-1]
_ = x[VariantRFC4122-2]
_ = x[VariantMicrosoft-3]
_ = x[VariantFuture-4]
}
const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
func (i Variant) String() string {
if i >= Variant(len(_Variant_index)-1) {
return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
}

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package security
@@ -21,7 +20,6 @@ type (
trusteeForm uint32
trusteeType uint32
//nolint:structcheck // structcheck thinks fields are unused, but the are used to pass data to OS
explicitAccess struct {
accessPermissions accessMask
accessMode accessMode
@@ -29,7 +27,6 @@ type (
trustee trustee
}
//nolint:structcheck,unused // structcheck thinks fields are unused, but the are used to pass data to OS
trustee struct {
multipleTrustee *trustee
multipleTrusteeOperation int32
@@ -47,7 +44,6 @@ const (
desiredAccessReadControl desiredAccess = 0x20000
desiredAccessWriteDac desiredAccess = 0x40000
//cspell:disable-next-line
gvmga = "GrantVmGroupAccess:"
inheritModeNoInheritance inheritMode = 0x0
@@ -60,9 +56,9 @@ const (
shareModeRead shareMode = 0x1
shareModeWrite shareMode = 0x2
sidVMGroup = "S-1-5-83-0"
sidVmGroup = "S-1-5-83-0"
trusteeFormIsSID trusteeForm = 0
trusteeFormIsSid trusteeForm = 0
trusteeTypeWellKnownGroup trusteeType = 5
)
@@ -71,8 +67,6 @@ const (
// include Grant ACE entries for the VM Group SID. This is a golang re-
// implementation of the same function in vmcompute, just not exported in
// RS5. Which kind of sucks. Sucks a lot :/
//
//revive:disable-next-line:var-naming VM, not Vm
func GrantVmGroupAccess(name string) error {
// Stat (to determine if `name` is a directory).
s, err := os.Stat(name)
@@ -85,7 +79,7 @@ func GrantVmGroupAccess(name string) error {
if err != nil {
return err // Already wrapped
}
defer syscall.CloseHandle(fd) //nolint:errcheck
defer syscall.CloseHandle(fd)
// Get the current DACL and Security Descriptor. Must defer LocalFree on success.
ot := objectTypeFileObject
@@ -95,7 +89,7 @@ func GrantVmGroupAccess(name string) error {
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) //nolint:errcheck
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
// Generate a new DACL which is the current DACL with the required ACEs added.
// Must defer LocalFree on success.
@@ -103,7 +97,7 @@ func GrantVmGroupAccess(name string) error {
if err != nil {
return err // Already wrapped
}
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) //nolint:errcheck
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL)))
// And finally use SetSecurityInfo to apply the updated DACL.
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
@@ -116,19 +110,16 @@ func GrantVmGroupAccess(name string) error {
// createFile is a helper function to call [Nt]CreateFile to get a handle to
// the file or directory.
func createFile(name string, isDir bool) (syscall.Handle, error) {
namep, err := syscall.UTF16FromString(name)
if err != nil {
return syscall.InvalidHandle, fmt.Errorf("could not convernt name to UTF-16: %w", err)
}
namep := syscall.StringToUTF16(name)
da := uint32(desiredAccessReadControl | desiredAccessWriteDac)
sm := uint32(shareModeRead | shareModeWrite)
fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL)
if isDir {
fa |= syscall.FILE_FLAG_BACKUP_SEMANTICS
fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS)
}
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
if err != nil {
return syscall.InvalidHandle, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
}
return fd, nil
}
@@ -137,9 +128,9 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
// The caller is responsible for LocalFree of the returned DACL on success.
func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) {
// Generate pointers to the SIDs based on the string SIDs
sid, err := syscall.StringToSid(sidVMGroup)
sid, err := syscall.StringToSid(sidVmGroup)
if err != nil {
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVMGroup, err)
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
}
inheritance := inheritModeNoInheritance
@@ -148,12 +139,12 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
}
eaArray := []explicitAccess{
{
explicitAccess{
accessPermissions: accessMaskDesiredPermission,
accessMode: accessModeGrant,
inheritance: inheritance,
trustee: trustee{
trusteeForm: trusteeFormIsSID,
trusteeForm: trusteeFormIsSid,
trusteeType: trusteeTypeWellKnownGroup,
name: uintptr(unsafe.Pointer(sid)),
},

View File

@@ -1,6 +1,6 @@
package security
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go
//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo
//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo

View File

@@ -1,6 +1,4 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
// Code generated by 'go generate'; DO NOT EDIT.
package security

View File

@@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winio
@@ -25,17 +24,22 @@ import (
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
//revive:disable-next-line:var-naming ALL_CAPS
SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
SE_PRIVILEGE_ENABLED = 2
//revive:disable-next-line:var-naming ALL_CAPS
ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
SeSecurityPrivilege = "SeSecurityPrivilege"
)
const (
securityAnonymous = iota
securityIdentification
securityImpersonation
securityDelegation
)
var (
privNames = make(map[string]uint64)
privNameMutex sync.Mutex
@@ -47,9 +51,11 @@ type PrivilegeError struct {
}
func (e *PrivilegeError) Error() string {
s := "Could not enable privilege "
s := ""
if len(e.privileges) > 1 {
s = "Could not enable privileges "
} else {
s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
@@ -88,7 +94,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
}
func mapPrivileges(names []string) ([]uint64, error) {
privileges := make([]uint64, 0, len(names))
var privileges []uint64
privNameMutex.Lock()
defer privNameMutex.Unlock()
for _, name := range names {
@@ -121,7 +127,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
return err
}
p := windows.CurrentProcess()
p, _ := windows.GetCurrentProcess()
var token windows.Token
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
if err != nil {
@@ -134,10 +140,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
_ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
_ = binary.Write(&b, binary.LittleEndian, p)
_ = binary.Write(&b, binary.LittleEndian, action)
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
@@ -145,7 +151,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e
if !success {
return err
}
if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
if err == ERROR_NOT_ALL_ASSIGNED {
return &PrivilegeError{privileges}
}
return nil
@@ -171,7 +177,7 @@ func getPrivilegeName(luid uint64) string {
}
func newThreadToken() (windows.Token, error) {
err := impersonateSelf(windows.SecurityImpersonation)
err := impersonateSelf(securityImpersonation)
if err != nil {
return 0, err
}

View File

@@ -1,6 +1,3 @@
//go:build windows
// +build windows
package winio
import (
@@ -116,16 +113,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
}
var b bytes.Buffer
_ = binary.Write(&b, binary.LittleEndian, &data)
binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
_ = binary.Write(&b, binary.LittleEndian, flags)
binary.Write(&b, binary.LittleEndian, flags)
}
_ = binary.Write(&b, binary.LittleEndian, ntTarget16)
_ = binary.Write(&b, binary.LittleEndian, target16)
binary.Write(&b, binary.LittleEndian, ntTarget16)
binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}

View File

@@ -1,25 +1,23 @@
//go:build windows
// +build windows
package winio
import (
"errors"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
const (
cERROR_NONE_MAPPED = syscall.Errno(1332)
)
type AccountLookupError struct {
Name string
Err error
@@ -30,10 +28,8 @@ func (e *AccountLookupError) Error() string {
return "lookup account: empty account name specified"
}
var s string
switch {
case errors.Is(e.Err, windows.ERROR_INVALID_SID):
s = "the security ID structure is invalid"
case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
switch e.Err {
case cERROR_NONE_MAPPED:
s = "not found"
default:
s = e.Err.Error()
@@ -41,8 +37,6 @@ func (e *AccountLookupError) Error() string {
return "lookup account " + e.Name + ": " + s
}
func (e *AccountLookupError) Unwrap() error { return e.Err }
type SddlConversionError struct {
Sddl string
Err error
@@ -52,19 +46,15 @@ func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
func (e *SddlConversionError) Unwrap() error { return e.Err }
// LookupSidByName looks up the SID of an account by name
//
//revive:disable-next-line:var-naming SID, not Sid
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
@@ -83,42 +73,6 @@ func LookupSidByName(name string) (sid string, err error) {
return sid, nil
}
// LookupNameBySid looks up the name of an account by SID
//
//revive:disable-next-line:var-naming SID, not Sid
func LookupNameBySid(sid string) (name string, err error) {
if sid == "" {
return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
}
sidBuffer, err := windows.UTF16PtrFromString(sid)
if err != nil {
return "", &AccountLookupError{sid, err}
}
var sidPtr *byte
if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
return "", &AccountLookupError{sid, err}
}
defer localFree(uintptr(unsafe.Pointer(sidPtr)))
var nameSize, refDomainSize, sidNameUse uint32
err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
return "", &AccountLookupError{sid, err}
}
nameBuffer := make([]uint16, nameSize)
refDomainBuffer := make([]uint16, refDomainSize)
err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
if err != nil {
return "", &AccountLookupError{sid, err}
}
name = windows.UTF16ToString(nameBuffer)
return name, nil
}
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
@@ -133,7 +87,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
// The returned string length seems to include an arbitrary number of terminating NULs.
// The returned string length seems to including an aribtrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {

View File

@@ -1,5 +1,3 @@
//go:build windows
package winio
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go

View File

@@ -1,5 +0,0 @@
//go:build tools
package winio
import _ "golang.org/x/tools/cmd/stringer"

View File

@@ -11,7 +11,7 @@ import (
"golang.org/x/sys/windows"
)
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
@@ -62,8 +62,8 @@ type OpenVirtualDiskParameters struct {
Version2 OpenVersion2
}
// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
type openVersion2 struct {
getInfoOnly int32
@@ -87,10 +87,9 @@ type AttachVirtualDiskParameters struct {
}
const (
//revive:disable-next-line:var-naming ALL_CAPS
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3
// Access Mask for opening a VHD.
// Access Mask for opening a VHD
VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000
VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000
VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000
@@ -102,7 +101,7 @@ const (
VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000
VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000
// Flags for creating a VHD.
// Flags for creating a VHD
CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0
CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1
CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2
@@ -110,12 +109,12 @@ const (
CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8
CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10
CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd
CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40
CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem
CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100
CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200
// Flags for opening a VHD.
// Flags for opening a VHD
OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000
OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001
OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002
@@ -128,7 +127,7 @@ const (
OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100
OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200
// Flags for attaching a VHD.
// Flags for attaching a VHD
AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000
AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001
AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002
@@ -141,14 +140,12 @@ const (
AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100
AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200
// Flags for detaching a VHD.
// Flags for detaching a VHD
DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0
)
// CreateVhdx is a helper function to create a simple vhdx file at the given path using
// default values.
//
//revive:disable-next-line:var-naming VHDX, not Vhdx
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
params := CreateVirtualDiskParameters{
Version: 2,
@@ -175,8 +172,6 @@ func DetachVirtualDisk(handle syscall.Handle) (err error) {
}
// DetachVhd detaches a vhd found at `path`.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func DetachVhd(path string) error {
handle, err := OpenVirtualDisk(
path,
@@ -186,16 +181,12 @@ func DetachVhd(path string) error {
if err != nil {
return err
}
defer syscall.CloseHandle(handle) //nolint:errcheck
defer syscall.CloseHandle(handle)
return DetachVirtualDisk(handle)
}
// AttachVirtualDisk attaches a virtual hard disk for use.
func AttachVirtualDisk(
handle syscall.Handle,
attachVirtualDiskFlag AttachVirtualDiskFlag,
parameters *AttachVirtualDiskParameters,
) (err error) {
func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) {
// Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5.
if err := attachVirtualDisk(
handle,
@@ -212,8 +203,6 @@ func AttachVirtualDisk(
// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2
// of the ATTACH_VIRTUAL_DISK_PARAMETERS.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func AttachVhd(path string) (err error) {
handle, err := OpenVirtualDisk(
path,
@@ -224,7 +213,7 @@ func AttachVhd(path string) (err error) {
return err
}
defer syscall.CloseHandle(handle) //nolint:errcheck
defer syscall.CloseHandle(handle)
params := AttachVirtualDiskParameters{Version: 2}
if err := AttachVirtualDisk(
handle,
@@ -237,11 +226,7 @@ func AttachVhd(path string) (err error) {
}
// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags.
func OpenVirtualDisk(
vhdPath string,
virtualDiskAccessMask VirtualDiskAccessMask,
openVirtualDiskFlags VirtualDiskFlag,
) (syscall.Handle, error) {
func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) {
parameters := OpenVirtualDiskParameters{Version: 2}
handle, err := OpenVirtualDiskWithParameters(
vhdPath,
@@ -256,12 +241,7 @@ func OpenVirtualDisk(
}
// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters.
func OpenVirtualDiskWithParameters(
vhdPath string,
virtualDiskAccessMask VirtualDiskAccessMask,
openVirtualDiskFlags VirtualDiskFlag,
parameters *OpenVirtualDiskParameters,
) (syscall.Handle, error) {
func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
@@ -299,12 +279,7 @@ func OpenVirtualDiskWithParameters(
}
// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk.
func CreateVirtualDisk(
path string,
virtualDiskAccessMask VirtualDiskAccessMask,
createVirtualDiskFlags CreateVirtualDiskFlag,
parameters *CreateVirtualDiskParameters,
) (syscall.Handle, error) {
func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) {
var (
handle syscall.Handle
defaultType VirtualStorageType
@@ -348,8 +323,6 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
}
// CreateDiffVhd is a helper function to create a differencing virtual disk.
//
//revive:disable-next-line:var-naming VHD, not Vhd
func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error {
// Setting `ParentPath` is how to signal to create a differencing disk.
createParams := &CreateVirtualDiskParameters{

View File

@@ -1,6 +1,4 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
// Code generated by 'go generate'; DO NOT EDIT.
package vhd

View File

@@ -1,6 +1,4 @@
//go:build windows
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
// Code generated by 'go generate'; DO NOT EDIT.
package winio
@@ -49,11 +47,9 @@ var (
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
@@ -78,6 +74,7 @@ var (
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procbind = modws2_32.NewProc("bind")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
@@ -126,14 +123,6 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
return
}
func convertStringSidToSid(str *uint16, sid **byte) (err error) {
r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
@@ -165,14 +154,6 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
return
}
func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
@@ -399,25 +380,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntStatus(r0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntStatus(r0)
status = ntstatus(r0)
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntStatus(r0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntStatus) (winerr error) {
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
@@ -436,3 +417,11 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
}
return
}
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
err = errnoErr(e1)
}
return
}

View File

@@ -86,12 +86,6 @@ type Container interface {
// container to be terminated by some error condition (including calling
// Close).
Wait() error
// WaitChannel returns the wait channel of the container
WaitChannel() <-chan struct{}
// WaitError returns the container termination error.
// This function should only be called after the channel in WaitChannel()
// is closed. Otherwise it is not thread safe.
WaitError() error
// Modify sends a request to modify container resources
Modify(ctx context.Context, config interface{}) error
}

View File

@@ -154,7 +154,7 @@ func (e *HcsError) Error() string {
func (e *HcsError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary() //nolint:staticcheck
return ok && err.Temporary()
}
func (e *HcsError) Timeout() bool {
@@ -193,7 +193,7 @@ func (e *SystemError) Error() string {
func (e *SystemError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary() //nolint:staticcheck
return ok && err.Temporary()
}
func (e *SystemError) Timeout() bool {
@@ -224,7 +224,7 @@ func (e *ProcessError) Error() string {
func (e *ProcessError) Temporary() bool {
err, ok := e.Err.(net.Error)
return ok && err.Temporary() //nolint:staticcheck
return ok && err.Temporary()
}
func (e *ProcessError) Timeout() bool {

View File

@@ -287,19 +287,11 @@ func (computeSystem *System) waitBackground() {
oc.SetSpanStatus(span, err)
}
func (computeSystem *System) WaitChannel() <-chan struct{} {
return computeSystem.waitBlock
}
func (computeSystem *System) WaitError() error {
return computeSystem.waitError
}
// Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error {
<-computeSystem.WaitChannel()
return computeSystem.WaitError()
<-computeSystem.waitBlock
return computeSystem.waitError
}
// ExitError returns an error describing the reason the compute system terminated.

View File

@@ -57,7 +57,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
}).Warn("failed to parse job object message")
continue
}
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
if err := msq.Write(notification); err == queue.ErrQueueClosed {
// Write will only return an error when the queue is closed.
// The only time a queue would ever be closed is when we call `Close` on
// the job it belongs to which also removes it from the jobMap, so something

View File

@@ -68,9 +68,6 @@ type Options struct {
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
// Defaults to false.
UseNTVariant bool
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
// calls SetInformationJobObject with the JobObjectIoAttribution class.
EnableIOTracking bool
}
// Create creates a job object.
@@ -137,12 +134,6 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
job.mq = mq
}
if options.EnableIOTracking {
if err := enableIOTracking(jobHandle); err != nil {
return nil, err
}
}
return job, nil
}
@@ -244,7 +235,7 @@ func (job *JobObject) PollNotification() (interface{}, error) {
if job.mq == nil {
return nil, ErrNotRegistered
}
return job.mq.Dequeue()
return job.mq.ReadOrWait()
}
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
@@ -339,7 +330,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
)
@@ -365,7 +356,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
if err = winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&buf[0]),
uintptr(unsafe.Pointer(&buf[0])),
uint32(len(buf)),
nil,
); err != nil {
@@ -393,7 +384,7 @@ func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMA
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectMemoryUsageInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -415,7 +406,7 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicAccountingInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -424,9 +415,7 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
return &info, nil
}
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
// hasn't been called since creation of the job.
// QueryStorageStats gets the storage (I/O) stats for the job object.
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
@@ -441,7 +430,7 @@ func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFO
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectIoAttribution,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -487,7 +476,7 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
status := winapi.NtQueryInformationProcess(
h,
winapi.ProcessVmCounters,
unsafe.Pointer(&vmCounters),
uintptr(unsafe.Pointer(&vmCounters)),
uint32(unsafe.Sizeof(vmCounters)),
nil,
)
@@ -508,31 +497,3 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
return jobWorkingSetSize, nil
}
// SetIOTracking enables IO tracking for processes in the job object.
// This enables use of the QueryStorageStats method.
func (job *JobObject) SetIOTracking() error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
return enableIOTracking(job.handle)
}
func enableIOTracking(job windows.Handle) error {
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
}
if _, err := windows.SetInformationJobObject(
job,
winapi.JobObjectIoAttribution,
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
); err != nil {
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
}
return nil
}

View File

@@ -202,7 +202,7 @@ func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMI
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectExtendedLimitInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -224,7 +224,7 @@ func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectCpuRateControlInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {

View File

@@ -5,7 +5,10 @@ import (
"sync"
)
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
var (
ErrQueueClosed = errors.New("the queue is closed for reading and writing")
ErrQueueEmpty = errors.New("the queue is empty")
)
// MessageQueue represents a threadsafe message queue to be used to retrieve or
// write messages to.
@@ -26,8 +29,8 @@ func NewMessageQueue() *MessageQueue {
}
}
// Enqueue writes `msg` to the queue.
func (mq *MessageQueue) Enqueue(msg interface{}) error {
// Write writes `msg` to the queue.
func (mq *MessageQueue) Write(msg interface{}) error {
mq.m.Lock()
defer mq.m.Unlock()
@@ -40,37 +43,55 @@ func (mq *MessageQueue) Enqueue(msg interface{}) error {
return nil
}
// Dequeue will read a value from the queue and remove it. If the queue
// is empty, this will block until the queue is closed or a value gets enqueued.
func (mq *MessageQueue) Dequeue() (interface{}, error) {
// Read will read a value from the queue if available, otherwise return an error.
func (mq *MessageQueue) Read() (interface{}, error) {
mq.m.Lock()
defer mq.m.Unlock()
for !mq.closed && mq.size() == 0 {
mq.c.Wait()
}
// We got woken up, check if it's because the queue got closed.
if mq.closed {
return nil, ErrQueueClosed
}
if mq.isEmpty() {
return nil, ErrQueueEmpty
}
val := mq.messages[0]
mq.messages[0] = nil
mq.messages = mq.messages[1:]
return val, nil
}
// Size returns the size of the queue.
func (mq *MessageQueue) Size() int {
mq.m.RLock()
defer mq.m.RUnlock()
return mq.size()
// ReadOrWait will read a value from the queue if available, else it will wait for a
// value to become available. This will block forever if nothing gets written or until
// the queue gets closed.
func (mq *MessageQueue) ReadOrWait() (interface{}, error) {
mq.m.Lock()
if mq.closed {
mq.m.Unlock()
return nil, ErrQueueClosed
}
if mq.isEmpty() {
for !mq.closed && mq.isEmpty() {
mq.c.Wait()
}
mq.m.Unlock()
return mq.Read()
}
val := mq.messages[0]
mq.messages[0] = nil
mq.messages = mq.messages[1:]
mq.m.Unlock()
return val, nil
}
// Nonexported size check to check if the queue is empty inside already locked functions.
func (mq *MessageQueue) size() int {
return len(mq.messages)
// IsEmpty returns if the queue is empty
func (mq *MessageQueue) IsEmpty() bool {
mq.m.RLock()
defer mq.m.RUnlock()
return len(mq.messages) == 0
}
// Nonexported empty check that doesn't lock so we can call this in Read and Write.
func (mq *MessageQueue) isEmpty() bool {
return len(mq.messages) == 0
}
// Close closes the queue for future writes or reads. Any attempts to read or write from the
@@ -78,15 +99,13 @@ func (mq *MessageQueue) size() int {
func (mq *MessageQueue) Close() {
mq.m.Lock()
defer mq.m.Unlock()
// Already closed, noop
// Already closed
if mq.closed {
return
}
mq.messages = nil
mq.closed = true
// If there's anybody currently waiting on a value from Dequeue, we need to
// If there's anybody currently waiting on a value from ReadOrWait, we need to
// broadcast so the read(s) can return ErrQueueClosed.
mq.c.Broadcast()
}

View File

@@ -175,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// LPDWORD lpReturnLength
// );
//
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
// HANDLE OpenJobObjectW(
// DWORD dwDesiredAccess,

View File

@@ -18,7 +18,7 @@ const ProcessVmCounters = 3
// [out, optional] PULONG ReturnLength
// );
//
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
// typedef struct _VM_COUNTERS_EX
// {

View File

@@ -12,8 +12,7 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
// ULONG SystemInformationLength,
// PULONG ReturnLength
// );
//
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
type SYSTEM_PROCESS_INFORMATION struct {
NextEntryOffset uint32 // ULONG

Some files were not shown because too many files have changed in this diff Show More