Compare commits
85 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf40000e72 | ||
|
|
fb99d85b76 | ||
|
|
85476bf093 | ||
|
|
819c227bf2 | ||
|
|
4b23819189 | ||
|
|
b893112a90 | ||
|
|
9fa477e303 | ||
|
|
b7e3320fe4 | ||
|
|
58025ee1be | ||
|
|
7a3bc6efd4 | ||
|
|
de0fb93f3d | ||
|
|
4419612150 | ||
|
|
5ececfad2c | ||
|
|
4f376bbb5e | ||
|
|
d03123204d | ||
|
|
0df1c44b12 | ||
|
|
75fbb8483e | ||
|
|
52e2737460 | ||
|
|
c83cd3fba9 | ||
|
|
d41ac23a03 | ||
|
|
dbebeb7235 | ||
|
|
9e129fd653 | ||
|
|
0a44c7f162 | ||
|
|
b12735358a | ||
|
|
318beaa720 | ||
|
|
f7dc659e52 | ||
|
|
35afa1c1f4 | ||
|
|
c71b655cfc | ||
|
|
ec9db747d9 | ||
|
|
3e8ded8646 | ||
|
|
966f32b2ac | ||
|
|
cde99f8517 | ||
|
|
01db066498 | ||
|
|
9653e2ba9a | ||
|
|
4d87007327 | ||
|
|
dbea38b440 | ||
|
|
0bc120edda | ||
|
|
297bfa6b30 | ||
|
|
58c078fc88 | ||
|
|
79663fe1a0 | ||
|
|
9a4e0e8a28 | ||
|
|
515386e1a7 | ||
|
|
49bf6fc095 | ||
|
|
d63314d737 | ||
|
|
b186786563 | ||
|
|
3cc0218280 | ||
|
|
b794edef6a | ||
|
|
5cc3c510c5 | ||
|
|
5aec4fe722 | ||
|
|
1513b82eed | ||
|
|
7d5e57f7ff | ||
|
|
8ecefa978c | ||
|
|
a673ac7ae6 | ||
|
|
99e512e3f2 | ||
|
|
166d4db597 | ||
|
|
c04748f3fb | ||
|
|
63e314ea22 | ||
|
|
0d6bf94eb6 | ||
|
|
f88cddfb4d | ||
|
|
0814bc19bd | ||
|
|
422ad51afb | ||
|
|
a5a3a7be11 | ||
|
|
a4b830a9fc | ||
|
|
68ccdd77fe | ||
|
|
6124673bbc | ||
|
|
cac2dd4dd8 | ||
|
|
70b57afda6 | ||
|
|
f6c2a1e24e | ||
|
|
480befa88f | ||
|
|
a3fef4879e | ||
|
|
330cfc923c | ||
|
|
0fc0551edd | ||
|
|
296a752555 | ||
|
|
3fbfb56001 | ||
|
|
50a6a566ca | ||
|
|
aca2c96602 | ||
|
|
57a0f38db6 | ||
|
|
ff39bf0b80 | ||
|
|
4b38cff005 | ||
|
|
89949a1156 | ||
|
|
97ec4563b4 | ||
|
|
47665ad777 | ||
|
|
e1e58584a9 | ||
|
|
62fc48433c | ||
|
|
a72aaa2268 |
5
.papr.sh
@@ -15,11 +15,13 @@ dnf install -y \
|
||||
findutils \
|
||||
git \
|
||||
glib2-devel \
|
||||
gnupg \
|
||||
golang \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
libseccomp-devel \
|
||||
libselinux-devel \
|
||||
libselinux-utils \
|
||||
make \
|
||||
ostree-devel \
|
||||
which
|
||||
@@ -28,5 +30,4 @@ dnf install -y \
|
||||
# short-commit-subject validation test, so tell git-validate.sh to only check
|
||||
# up to, but not including, the merge commit.
|
||||
export GITVALIDATE_TIP=$(cd $GOSRC; git log -2 --pretty='%H' | tail -n 1)
|
||||
make -C $GOSRC install.tools runc all validate TAGS="seccomp"
|
||||
$GOSRC/tests/test_runner.sh
|
||||
make -C $GOSRC install.tools runc all validate test-unit test-integration TAGS="seccomp"
|
||||
|
||||
@@ -10,6 +10,6 @@ required: true
|
||||
|
||||
tests:
|
||||
# mount yum repos to inherit injected mirrors from PAPR
|
||||
- docker run --privileged -v /etc/yum.repos.d:/etc/yum.repos.d.host:ro
|
||||
- docker run --net=host --privileged -v /etc/yum.repos.d:/etc/yum.repos.d.host:ro
|
||||
-v $PWD:/code registry.fedoraproject.org/fedora:26 sh -c
|
||||
"cp -fv /etc/yum.repos.d{.host/*.repo,} && /code/.papr.sh"
|
||||
|
||||
23
.travis.yml
@@ -1,10 +1,21 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- tip
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
# If the latest unstable development version of go fails, that's OK.
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
# Don't hold on the tip tests to finish. Mark tests green if the
|
||||
# stable versions pass.
|
||||
fast_finish: true
|
||||
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
@@ -13,5 +24,7 @@ before_install:
|
||||
- sudo apt-get -qq install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libselinux1-dev
|
||||
- sudo apt-get -qq remove libseccomp2
|
||||
script:
|
||||
- make install.tools install.libseccomp.sudo all runc validate TAGS="apparmor seccomp"
|
||||
- make install.tools install.libseccomp.sudo all runc validate TAGS="apparmor seccomp containers_image_ostree_stub"
|
||||
- go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
|
||||
- tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v -root $tmp/root -runroot $tmp/runroot -storage-driver vfs -signature-policy `pwd`/tests/policy.json
|
||||
- cd tests; sudo PATH="$PATH" ./test_runner.sh
|
||||
|
||||
80
CHANGELOG.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Changelog
|
||||
|
||||
## 0.5 - 2017-11-07
|
||||
Add secrets patch to buildah
|
||||
Add proper SELinux labeling to buildah run
|
||||
Add tls-verify to bud command
|
||||
Make filtering by date use the image's date
|
||||
images: don't list unnamed images twice
|
||||
Fix timeout issue
|
||||
Add further tty verbiage to buildah run
|
||||
Make inspect try an image on failure if type not specified
|
||||
Add support for `buildah run --hostname`
|
||||
Tons of bug fixes and code cleanup
|
||||
|
||||
## 0.4 - 2017-09-22
|
||||
### Added
|
||||
Update buildah spec file to match new version
|
||||
Bump to version 0.4
|
||||
Add default transport to push if not provided
|
||||
Add authentication to commit and push
|
||||
Remove --transport flag
|
||||
Run: don't complain about missing volume locations
|
||||
Add credentials to buildah from
|
||||
Remove export command
|
||||
Bump containers/storage and containers/image
|
||||
|
||||
## 0.3 - 2017-07-20
|
||||
## 0.2 - 2017-07-18
|
||||
### Added
|
||||
Vendor in latest containers/image and containers/storage
|
||||
Update image-spec and runtime-spec to v1.0.0
|
||||
Add support for -- ending options parsing to buildah run
|
||||
Add/Copy need to support glob syntax
|
||||
Add flag to remove containers on commit
|
||||
Add buildah export support
|
||||
update 'buildah images' and 'buildah rmi' commands
|
||||
buildah containers/image: Add JSON output option
|
||||
Add 'buildah version' command
|
||||
Handle "run" without an explicit command correctly
|
||||
Ensure volume points get created, and with perms
|
||||
Add a -a/--all option to "buildah containers"
|
||||
|
||||
## 0.1 - 2017-06-14
|
||||
### Added
|
||||
Vendor in latest container/storage container/image
|
||||
Add a "push" command
|
||||
Add an option to specify a Create date for images
|
||||
Allow building a source image from another image
|
||||
Improve buildah commit performance
|
||||
Add a --volume flag to "buildah run"
|
||||
Fix inspect/tag-by-truncated-image-ID
|
||||
Include image-spec and runtime-spec versions
|
||||
buildah mount command should list mounts when no arguments are given.
|
||||
Make the output image format selectable
|
||||
commit images in multiple formats
|
||||
Also import configurations from V2S1 images
|
||||
Add a "tag" command
|
||||
Add an "inspect" command
|
||||
Update reference comments for docker types origins
|
||||
Improve configuration preservation in imagebuildah
|
||||
Report pull/commit progress by default
|
||||
Contribute buildah.spec
|
||||
Remove --mount from buildah-from
|
||||
Add a build-using-dockerfile command (alias: bud)
|
||||
Create manpages for the buildah project
|
||||
Add installation for buildah and bash completions
|
||||
Rename "list"/"delete" to "containers"/"rm"
|
||||
Switch `buildah list quiet` option to only list container id's
|
||||
buildah delete should be able to delete multiple containers
|
||||
Correctly set tags on the names of pulled images
|
||||
Don't mix "config" in with "run" and "commit"
|
||||
Add a "list" command, for listing active builders
|
||||
Add "add" and "copy" commands
|
||||
Add a "run" command, using runc
|
||||
Massive refactoring
|
||||
Make a note to distinguish compression of layers
|
||||
|
||||
## 0.0 - 2017-01-26
|
||||
### Added
|
||||
Initial version, needs work
|
||||
142
CONTRIBUTING.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Contributing to Buildah
|
||||
|
||||
We'd love to have you join the community! Below summarizes the processes
|
||||
that we follow.
|
||||
|
||||
## Topics
|
||||
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/buildah/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
|
||||
If you find a new issue with the project we'd love to hear about it! The most
|
||||
important aspect of a bug report is that it includes enough information for
|
||||
us to reproduce it. So, please include as much detail as possible and try
|
||||
to remove the extra stuff that doesn't really relate to the issue itself.
|
||||
The easier it is for us to reproduce it, the faster it'll be fixed!
|
||||
|
||||
Please don't include any private/sensitive information in your issue!
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
No Pull Request (PR) is too small! Typos, additional comments in the code,
|
||||
new testcases, bug fixes, new features, more documentation, ... it's all
|
||||
welcome!
|
||||
|
||||
While bug fixes can first be identified via an "issue", that is not required.
|
||||
It's ok to just open up a PR with the fix, but make sure you include the same
|
||||
information you would have included in an issue - like how to reproduce it.
|
||||
|
||||
PRs for new features should include some background on what use cases the
|
||||
new code is trying to address. When possible and when it makes sense, try to break-up
|
||||
larger PRs into smaller ones - it's easier to review smaller
|
||||
code changes. But only if those smaller ones make sense as stand-alone PRs.
|
||||
|
||||
Regardless of the type of PR, all PRs should include:
|
||||
* well documented code changes
|
||||
* additional testcases. Ideally, they should fail w/o your code change applied
|
||||
* documentation changes
|
||||
|
||||
Squash your commits into logical pieces of work that might want to be reviewed
|
||||
separate from the rest of the PRs. But, squashing down to just one commit is ok
|
||||
too since in the end the entire PR will be reviewed anyway. When in doubt,
|
||||
squash.
|
||||
|
||||
PRs that fix issues should include a reference like `Closes #XXXX` in the
|
||||
commit message so that github will automatically close the referenced issue
|
||||
when the PR is merged.
|
||||
|
||||
<!--
|
||||
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
|
||||
-->
|
||||
|
||||
### Sign your PRs
|
||||
|
||||
The sign-off is a line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `cri-o`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/buildah/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/buildah/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
## Becoming a Maintainer
|
||||
|
||||
To become a maintainer you must first be nominated by an existing maintainer.
|
||||
If a majority (>50%) of maintainers agree then the proposal is adopted and
|
||||
you will be added to the list.
|
||||
|
||||
Removing a maintainer requires at least 75% of the remaining maintainers
|
||||
approval, or if the person requests to be removed then it is automatic.
|
||||
Normally, a maintainer will only be removed if they are considered to be
|
||||
inactive for a long period of time or are viewed as disruptive to the community.
|
||||
|
||||
The current list of maintainers can be found in the
|
||||
[MAINTAINERS](MAINTAINERS) file.
|
||||
-->
|
||||
23
Makefile
@@ -4,6 +4,7 @@ PREFIX := /usr/local
|
||||
BINDIR := $(PREFIX)/bin
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
|
||||
GO := go
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse --short HEAD)
|
||||
BUILD_INFO := $(shell date +%s)
|
||||
@@ -16,10 +17,10 @@ LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_I
|
||||
all: buildah imgtype docs
|
||||
|
||||
buildah: *.go imagebuildah/*.go cmd/buildah/*.go docker/*.go util/*.go
|
||||
go build $(LDFLAGS) -o buildah $(BUILDFLAGS) ./cmd/buildah
|
||||
$(GO) build $(LDFLAGS) -o buildah $(BUILDFLAGS) ./cmd/buildah
|
||||
|
||||
imgtype: *.go docker/*.go util/*.go tests/imgtype.go
|
||||
go build $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype.go
|
||||
$(GO) build $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype.go
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
@@ -50,16 +51,16 @@ validate:
|
||||
|
||||
.PHONY: install.tools
|
||||
install.tools:
|
||||
go get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
|
||||
go get -u $(BUILDFLAGS) github.com/vbatts/git-validation
|
||||
go get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
|
||||
$(GO) get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
|
||||
$(GO) get -u $(BUILDFLAGS) github.com/vbatts/git-validation
|
||||
$(GO) get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
|
||||
gometalinter.v1 -i
|
||||
|
||||
.PHONY: runc
|
||||
runc: gopath
|
||||
rm -rf ../../opencontainers/runc
|
||||
git clone https://github.com/opencontainers/runc ../../opencontainers/runc
|
||||
cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && go build -tags "$(AUTOTAGS) $(TAGS)"
|
||||
cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(AUTOTAGS) $(TAGS)"
|
||||
ln -sf ../../opencontainers/runc/runc
|
||||
|
||||
.PHONY: install.libseccomp.sudo
|
||||
@@ -76,3 +77,13 @@ install:
|
||||
.PHONY: install.completions
|
||||
install.completions:
|
||||
install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/${BASHINSTALLDIR}/buildah
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration:
|
||||
cd tests; ./test_runner.sh
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit:
|
||||
tmp=$(shell mktemp -d) ; \
|
||||
mkdir -p $$tmp/root $$tmp/runroot; \
|
||||
$(GO) test -v -tags "$(AUTOTAGS) $(TAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json
|
||||
|
||||
44
README.md
@@ -1,4 +1,6 @@
|
||||
[buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool which facilitates building OCI container images
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool which facilitates building OCI container images
|
||||
================================================================
|
||||
|
||||
[](https://goreportcard.com/report/github.com/projectatomic/buildah)
|
||||
@@ -6,7 +8,7 @@
|
||||
|
||||
Note: this package is in alpha, but is close to being feature-complete.
|
||||
|
||||
The buildah package provides a command line tool which can be used to
|
||||
The Buildah package provides a command line tool which can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
@@ -15,9 +17,11 @@ The buildah package provides a command line tool which can be used to
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**Installation notes**
|
||||
|
||||
Prior to installing buildah, install the following packages on your linux distro:
|
||||
Prior to installing Buildah, install the following packages on your linux distro:
|
||||
* make
|
||||
* golang (Requires version 1.8.1 or higher.)
|
||||
* bats
|
||||
@@ -30,7 +34,7 @@ Prior to installing buildah, install the following packages on your linux distro
|
||||
* glib2-devel
|
||||
* libassuan-devel
|
||||
* ostree-devel
|
||||
* runc
|
||||
* runc (Requires version 1.0 RC4 or higher.)
|
||||
* skopeo-containers
|
||||
|
||||
In Fedora, you can use this command:
|
||||
@@ -53,7 +57,7 @@ In Fedora, you can use this command:
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
Then to install buildah on Fedora follow the steps in this example:
|
||||
Then to install Buildah on Fedora follow the steps in this example:
|
||||
|
||||
|
||||
```
|
||||
@@ -67,6 +71,30 @@ Then to install buildah on Fedora follow the steps in this example:
|
||||
buildah --help
|
||||
```
|
||||
|
||||
In RHEL 7, ensure that you are subscribed to `rhel-7-server-rpms`,
|
||||
`rhel-7-server-extras-rpms`, and `rhel-7-server-optional-rpms`, then
|
||||
run this command:
|
||||
|
||||
```
|
||||
yum -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
The build steps for Buildah on RHEL are the same as Fedora, above.
|
||||
|
||||
In Ubuntu zesty and xenial, you can use this command:
|
||||
|
||||
```
|
||||
@@ -78,7 +106,7 @@ In Ubuntu zesty and xenial, you can use this command:
|
||||
apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
apt-get -y install golang-1.8
|
||||
```
|
||||
Then to install buildah on Ubuntu follow the steps in this example:
|
||||
Then to install Buildah on Ubuntu follow the steps in this example:
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
@@ -90,9 +118,9 @@ Then to install buildah on Ubuntu follow the steps in this example:
|
||||
make install
|
||||
buildah --help
|
||||
```
|
||||
buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
|
||||
[runc](https://github.com/opencontainers/runc) for buildah to call for those cases.
|
||||
[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
|
||||
11
add.go
@@ -11,10 +11,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// addURL copies the contents of the source URL to the destination. This is
|
||||
@@ -60,7 +59,7 @@ func addURL(destination, srcurl string) error {
|
||||
// filesystem, optionally extracting contents of local files that look like
|
||||
// non-empty archives.
|
||||
func (b *Builder) Add(destination string, extract bool, source ...string) error {
|
||||
mountPoint, err := b.Mount("")
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,7 +143,7 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", d)
|
||||
}
|
||||
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", d+string(os.PathSeparator)+"*")
|
||||
if err := chrootarchive.CopyWithTar(gsrc, d); err != nil {
|
||||
if err := copyWithTar(gsrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||
}
|
||||
continue
|
||||
@@ -159,14 +158,14 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
}
|
||||
// Copy the file, preserving attributes.
|
||||
logrus.Debugf("copying %q to %q", gsrc, d)
|
||||
if err := chrootarchive.CopyFileWithTar(gsrc, d); err != nil {
|
||||
if err := copyFileWithTar(gsrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We're extracting an archive into the destination directory.
|
||||
logrus.Debugf("extracting contents of %q into %q", gsrc, dest)
|
||||
if err := chrootarchive.UntarPath(gsrc, dest); err != nil {
|
||||
if err := untarPath(gsrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error extracting %q into %q", gsrc, dest)
|
||||
}
|
||||
}
|
||||
|
||||
10
buildah.go
@@ -20,7 +20,7 @@ const (
|
||||
// identify working containers.
|
||||
Package = "buildah"
|
||||
// Version for the Package
|
||||
Version = "0.4"
|
||||
Version = "0.8"
|
||||
// The value we use to identify what type of information, currently a
|
||||
// serialized Builder structure, we are using as per-container state.
|
||||
// This should only be changed when we make incompatible changes to
|
||||
@@ -77,6 +77,10 @@ type Builder struct {
|
||||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
ProcessLabel string `json:"process-label,omitempty"`
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
MountLabel string `json:"mount-label,omitempty"`
|
||||
|
||||
// ImageAnnotations is a set of key-value pairs which is stored in the
|
||||
// image's manifest.
|
||||
@@ -87,6 +91,8 @@ type Builder struct {
|
||||
// Image metadata and runtime settings, in multiple formats.
|
||||
OCIv1 v1.Image `json:"ociv1,omitempty"`
|
||||
Docker docker.V2Image `json:"docker,omitempty"`
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
|
||||
}
|
||||
|
||||
// BuilderOptions are used to initialize a new Builder.
|
||||
@@ -126,6 +132,8 @@ type BuilderOptions struct {
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
DefaultMountsFilePath string
|
||||
}
|
||||
|
||||
// ImportOptions are used to initialize a Builder from an existing container
|
||||
|
||||
@@ -5,17 +5,29 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/imagebuildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
budFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "refrain from announcing build instructions and image read/write progress",
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "build-arg",
|
||||
Usage: "`argument=value` to supply to the builder",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file, f",
|
||||
Usage: "`pathname or URL` of a Dockerfile",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "`format` of the built image's manifest and metadata",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "pull",
|
||||
@@ -25,13 +37,9 @@ var (
|
||||
Name: "pull-always",
|
||||
Usage: "pull the image, even if a version is present",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "build-arg",
|
||||
Usage: "`argument=value` to supply to the builder",
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "refrain from announcing build instructions and image read/write progress",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "runtime",
|
||||
@@ -43,18 +51,19 @@ var (
|
||||
Usage: "add global flags for the container runtime",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "`format` of the built image's manifest and metadata",
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "tag, t",
|
||||
Usage: "`tag` to apply to the built image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file, f",
|
||||
Usage: "`pathname or URL` of a Dockerfile",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
|
||||
budDescription = "Builds an OCI image using instructions in one or more Dockerfiles."
|
||||
budCommand = cli.Command{
|
||||
Name: "build-using-dockerfile",
|
||||
@@ -77,35 +86,14 @@ func budCmd(c *cli.Context) error {
|
||||
tags = tags[1:]
|
||||
}
|
||||
}
|
||||
pull := true
|
||||
if c.IsSet("pull") {
|
||||
pull = c.BoolT("pull")
|
||||
}
|
||||
pullAlways := false
|
||||
if c.IsSet("pull-always") {
|
||||
pull = c.Bool("pull-always")
|
||||
}
|
||||
runtimeFlags := []string{}
|
||||
if c.IsSet("runtime-flag") {
|
||||
runtimeFlags = c.StringSlice("runtime-flag")
|
||||
}
|
||||
runtime := ""
|
||||
if c.IsSet("runtime") {
|
||||
runtime = c.String("runtime")
|
||||
}
|
||||
|
||||
pullPolicy := imagebuildah.PullNever
|
||||
if pull {
|
||||
if c.BoolT("pull") {
|
||||
pullPolicy = imagebuildah.PullIfMissing
|
||||
}
|
||||
if pullAlways {
|
||||
if c.Bool("pull-always") {
|
||||
pullPolicy = imagebuildah.PullAlways
|
||||
}
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
args := make(map[string]string)
|
||||
if c.IsSet("build-arg") {
|
||||
for _, arg := range c.StringSlice("build-arg") {
|
||||
@@ -117,14 +105,8 @@ func budCmd(c *cli.Context) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
dockerfiles := []string{}
|
||||
if c.IsSet("file") || c.IsSet("f") {
|
||||
dockerfiles = c.StringSlice("file")
|
||||
}
|
||||
|
||||
dockerfiles := c.StringSlice("file")
|
||||
format := "oci"
|
||||
if c.IsSet("format") {
|
||||
format = strings.ToLower(c.String("format"))
|
||||
@@ -190,6 +172,9 @@ func budCmd(c *cli.Context) error {
|
||||
if len(dockerfiles) == 0 {
|
||||
dockerfiles = append(dockerfiles, filepath.Join(contextDir, "Dockerfile"))
|
||||
}
|
||||
if err := validateFlags(c, budFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
@@ -200,16 +185,18 @@ func budCmd(c *cli.Context) error {
|
||||
ContextDirectory: contextDir,
|
||||
PullPolicy: pullPolicy,
|
||||
Compression: imagebuildah.Gzip,
|
||||
Quiet: quiet,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
Quiet: c.Bool("quiet"),
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
SkipTLSVerify: !c.Bool("tls-verify"),
|
||||
Args: args,
|
||||
Output: output,
|
||||
AdditionalTags: tags,
|
||||
Runtime: runtime,
|
||||
RuntimeArgs: runtimeFlags,
|
||||
Runtime: c.String("runtime"),
|
||||
RuntimeArgs: c.StringSlice("runtime-flag"),
|
||||
OutputFormat: format,
|
||||
AuthFilePath: c.String("authfile"),
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
|
||||
@@ -15,10 +15,6 @@ import (
|
||||
|
||||
var (
|
||||
commitFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
@@ -29,31 +25,36 @@ var (
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`format` of the image manifest and metadata",
|
||||
Value: "oci",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when writing images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "reference-time",
|
||||
Usage: "set the timestamp on the image to match the named `file`",
|
||||
Hidden: true,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when writing images",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "rm",
|
||||
Usage: "remove the container and its content after committing it to an image. Default leaves the container and its content in place.",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
commitDescription = "Writes a new image using the container's read-write layer and, if it is based\n on an image, the layers of that image"
|
||||
commitCommand = cli.Command{
|
||||
@@ -80,22 +81,13 @@ func commitCmd(c *cli.Context) error {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
image := args[0]
|
||||
if err := validateFlags(c, commitFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
compress := archive.Uncompressed
|
||||
if !c.IsSet("disable-compression") || !c.Bool("disable-compression") {
|
||||
compress = archive.Gzip
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
format := "oci"
|
||||
if c.IsSet("format") {
|
||||
format = c.String("format")
|
||||
compress := archive.Gzip
|
||||
if c.Bool("disable-compression") {
|
||||
compress = archive.Uncompressed
|
||||
}
|
||||
timestamp := time.Now().UTC()
|
||||
if c.IsSet("reference-time") {
|
||||
@@ -106,6 +98,8 @@ func commitCmd(c *cli.Context) error {
|
||||
}
|
||||
timestamp = finfo.ModTime().UTC()
|
||||
}
|
||||
|
||||
format := c.String("format")
|
||||
if strings.HasPrefix(strings.ToLower(format), "oci") {
|
||||
format = buildah.OCIv1ImageManifest
|
||||
} else if strings.HasPrefix(strings.ToLower(format), "docker") {
|
||||
@@ -140,11 +134,11 @@ func commitCmd(c *cli.Context) error {
|
||||
options := buildah.CommitOptions{
|
||||
PreferredManifestType: format,
|
||||
Compression: compress,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
HistoryTimestamp: ×tamp,
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
err = builder.Commit(dest, options)
|
||||
|
||||
@@ -2,6 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -131,6 +133,9 @@ func systemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
|
||||
if c.IsSet("signature-policy") {
|
||||
ctx.SignaturePolicyPath = c.String("signature-policy")
|
||||
}
|
||||
if c.IsSet("authfile") {
|
||||
ctx.AuthFilePath = c.String("authfile")
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
@@ -158,3 +163,39 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
Password: password,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validateFlags searches for StringFlags or StringSlice flags that never had
|
||||
// a value set. This commonly occurs when the CLI mistakenly takes the next
|
||||
// option and uses it as a value.
|
||||
func validateFlags(c *cli.Context, flags []cli.Flag) error {
|
||||
re, err := regexp.Compile("^-.+")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compiling regex failed")
|
||||
}
|
||||
|
||||
for _, flag := range flags {
|
||||
switch reflect.TypeOf(flag).String() {
|
||||
case "cli.StringSliceFlag":
|
||||
{
|
||||
f := flag.(cli.StringSliceFlag)
|
||||
name := strings.Split(f.Name, ",")
|
||||
val := c.StringSlice(name[0])
|
||||
for _, v := range val {
|
||||
if ok := re.MatchString(v); ok {
|
||||
return errors.Errorf("option --%s requires a value", name[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
case "cli.StringFlag":
|
||||
{
|
||||
f := flag.(cli.StringFlag)
|
||||
name := strings.Split(f.Name, ",")
|
||||
val := c.String(name[0])
|
||||
if ok := re.MatchString(val); ok {
|
||||
return errors.Errorf("option --%s requires a value", name[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,25 +1,59 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"os/user"
|
||||
"testing"
|
||||
|
||||
"flag"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
signaturePolicyPath = ""
|
||||
storeOptions = storage.DefaultStoreOptions
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.StringVar(&signaturePolicyPath, "signature-policy", "", "pathname of signature policy file (not usually used)")
|
||||
options := storage.StoreOptions{}
|
||||
debug := false
|
||||
flag.StringVar(&options.GraphRoot, "root", "", "storage root dir")
|
||||
flag.StringVar(&options.RunRoot, "runroot", "", "storage state dir")
|
||||
flag.StringVar(&options.GraphDriverName, "storage-driver", "", "storage driver")
|
||||
flag.BoolVar(&debug, "debug", false, "turn on debug logging")
|
||||
flag.Parse()
|
||||
if options.GraphRoot != "" || options.RunRoot != "" || options.GraphDriverName != "" {
|
||||
storeOptions = options
|
||||
}
|
||||
if buildah.InitReexec() {
|
||||
return
|
||||
}
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestGetStore(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.String("root", "", "path to the root directory in which data, including images, is stored")
|
||||
globalSet.String("root", "", "path to the directory in which data, including images, is stored")
|
||||
globalSet.String("runroot", "", "path to the directory in which state is stored")
|
||||
globalSet.String("storage-driver", "", "storage driver")
|
||||
globalCtx := cli.NewContext(nil, globalSet, nil)
|
||||
command := cli.Command{Name: "imagesCommand"}
|
||||
globalCtx.GlobalSet("root", storeOptions.GraphRoot)
|
||||
globalCtx.GlobalSet("runroot", storeOptions.RunRoot)
|
||||
globalCtx.GlobalSet("storage-driver", storeOptions.GraphDriverName)
|
||||
command := cli.Command{Name: "TestGetStore"}
|
||||
c := cli.NewContext(nil, set, globalCtx)
|
||||
c.Command = command
|
||||
|
||||
@@ -33,13 +67,19 @@ func TestGetSize(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -60,16 +100,24 @@ func failTestIfNotRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func pullTestImage(imageName string) error {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("pull", true, "pull the image if not present")
|
||||
globalSet := flag.NewFlagSet("globaltest", 0)
|
||||
globalCtx := cli.NewContext(nil, globalSet, nil)
|
||||
command := cli.Command{Name: "imagesCommand"}
|
||||
c := cli.NewContext(nil, set, globalCtx)
|
||||
c.Command = command
|
||||
c.Set("pull", "true")
|
||||
c.Args = append(c.Args, imageName)
|
||||
func pullTestImage(t *testing.T, imageName string) (string, error) {
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
options := buildah.BuilderOptions{
|
||||
FromImage: imageName,
|
||||
SignaturePolicyPath: signaturePolicyPath,
|
||||
}
|
||||
|
||||
return fromCommand(c)
|
||||
b, err := buildah.NewBuilder(store, options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := b.FromImageID
|
||||
err = b.Delete()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@ package main
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -18,58 +18,58 @@ const (
|
||||
|
||||
var (
|
||||
configFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "author",
|
||||
Usage: "image author contact `information`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "created-by",
|
||||
Usage: "`description` of how the image was created",
|
||||
Value: DefaultCreatedBy,
|
||||
cli.StringSliceFlag{
|
||||
Name: "annotation, a",
|
||||
Usage: "add `annotation` e.g. annotation=value, for the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "arch",
|
||||
Usage: "`architecture` of the target image",
|
||||
Usage: "set `architecture` of the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "os",
|
||||
Usage: "`operating system` of the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "`user` to run containers based on image as",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "port, p",
|
||||
Usage: "`port` to expose when running containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "`environment variable` to set when running containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "entrypoint",
|
||||
Usage: "`entry point` for containers based on image",
|
||||
Name: "author",
|
||||
Usage: "set image author contact `information`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cmd",
|
||||
Usage: "`command` for containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "`volume` to create for containers based on image",
|
||||
Usage: "sets the default `command` to run for containers based on the image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "workingdir",
|
||||
Usage: "working `directory` for containers based on image",
|
||||
Name: "created-by",
|
||||
Usage: "add `description` of how the image was created",
|
||||
Value: DefaultCreatedBy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "entrypoint",
|
||||
Usage: "set `entry point` for containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "add `environment variable` to be set when running containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "label, l",
|
||||
Usage: "image configuration `label` e.g. label=value",
|
||||
Usage: "add image configuration `label` e.g. label=value",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "os",
|
||||
Usage: "set `operating system` of the target image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "annotation, a",
|
||||
Usage: "`annotation` e.g. annotation=value, for the target image",
|
||||
Name: "port, p",
|
||||
Usage: "add `port` to expose when running containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "set default `user` to run inside containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "add default `volume` path to be created for containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "workingdir",
|
||||
Usage: "set working `directory` for containers based on image",
|
||||
},
|
||||
}
|
||||
configDescription = "Modifies the configuration values which will be saved to the image"
|
||||
@@ -171,6 +171,9 @@ func configCmd(c *cli.Context) error {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
name := args[0]
|
||||
if err := validateFlags(c, configFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,8 +20,12 @@ type jsonContainer struct {
|
||||
var (
|
||||
containersFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only container IDs",
|
||||
Name: "all, a",
|
||||
Usage: "also list non-buildah containers",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "noheading, n",
|
||||
@@ -32,12 +36,8 @@ var (
|
||||
Usage: "do not truncate output",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "also list non-buildah containers",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
Name: "quiet, q",
|
||||
Usage: "display only container IDs",
|
||||
},
|
||||
}
|
||||
containersDescription = "Lists containers which appear to be " + buildah.Package + " working containers, their\n names and IDs, and the names and IDs of the images from which they were\n initialized"
|
||||
@@ -52,39 +52,26 @@ var (
|
||||
)
|
||||
|
||||
func containersCmd(c *cli.Context) error {
|
||||
if err := validateFlags(c, containersFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
noheading := false
|
||||
if c.IsSet("noheading") {
|
||||
noheading = c.Bool("noheading")
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("notruncate") {
|
||||
truncate = !c.Bool("notruncate")
|
||||
}
|
||||
all := false
|
||||
if c.IsSet("all") {
|
||||
all = c.Bool("all")
|
||||
}
|
||||
jsonOut := false
|
||||
quiet := c.Bool("quiet")
|
||||
truncate := !c.Bool("notruncate")
|
||||
JSONContainers := []jsonContainer{}
|
||||
if c.IsSet("json") {
|
||||
jsonOut = c.Bool("json")
|
||||
}
|
||||
jsonOut := c.Bool("json")
|
||||
|
||||
list := func(n int, containerID, imageID, image, container string, isBuilder bool) {
|
||||
if jsonOut {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: containerID, Builder: isBuilder, ImageID: imageID, ImageName: image, ContainerName: container})
|
||||
return
|
||||
}
|
||||
if n == 0 && !noheading && !quiet {
|
||||
|
||||
if n == 0 && !c.Bool("noheading") && !quiet {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
} else {
|
||||
@@ -125,7 +112,7 @@ func containersCmd(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build containers")
|
||||
}
|
||||
if !all {
|
||||
if !c.Bool("all") {
|
||||
for i, builder := range builders {
|
||||
image := imageNameForID(builder.FromImageID)
|
||||
list(i, builder.ContainerID, builder.FromImageID, image, builder.Container, true)
|
||||
|
||||
@@ -12,16 +12,8 @@ import (
|
||||
var (
|
||||
fromFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "`name` for the working container",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "pull",
|
||||
Usage: "pull the image if not present",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "pull-always",
|
||||
Usage: "pull the image even if one with the same name is already present",
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
@@ -33,17 +25,29 @@ var (
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "`name` for the working container",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Require HTTPS and verify certificates when accessing the registry",
|
||||
Name: "pull",
|
||||
Usage: "pull the image if not present",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "pull-always",
|
||||
Usage: "pull the image even if named image is present in store (supersedes pull option)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pulling images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pulling images",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
fromDescription = "Creates a new working container, either from scratch or using a specified\n image as a starting point"
|
||||
@@ -67,44 +71,24 @@ func fromCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
|
||||
image := args[0]
|
||||
if err := validateFlags(c, fromFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
pull := true
|
||||
if c.IsSet("pull") {
|
||||
pull = c.BoolT("pull")
|
||||
}
|
||||
pullAlways := false
|
||||
if c.IsSet("pull-always") {
|
||||
pull = c.Bool("pull-always")
|
||||
}
|
||||
|
||||
pullPolicy := buildah.PullNever
|
||||
if pull {
|
||||
if c.BoolT("pull") {
|
||||
pullPolicy = buildah.PullIfMissing
|
||||
}
|
||||
if pullAlways {
|
||||
if c.Bool("pull-always") {
|
||||
pullPolicy = buildah.PullAlways
|
||||
}
|
||||
|
||||
name := ""
|
||||
if c.IsSet("name") {
|
||||
name = c.String("name")
|
||||
}
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
signaturePolicy := c.String("signature-policy")
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
@@ -112,13 +96,14 @@ func fromCmd(c *cli.Context) error {
|
||||
}
|
||||
|
||||
options := buildah.BuilderOptions{
|
||||
FromImage: image,
|
||||
Container: name,
|
||||
PullPolicy: pullPolicy,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
SystemContext: systemContext,
|
||||
FromImage: args[0],
|
||||
Container: c.String("name"),
|
||||
PullPolicy: pullPolicy,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
SystemContext: systemContext,
|
||||
DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -42,8 +42,20 @@ type filterParams struct {
|
||||
var (
|
||||
imagesFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only image IDs",
|
||||
Name: "digests",
|
||||
Usage: "show digests",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter, f",
|
||||
Usage: "filter output based on conditions provided",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print images using a Go template. will override --quiet",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "noheading, n",
|
||||
@@ -54,20 +66,8 @@ var (
|
||||
Usage: "do not truncate output",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "digests",
|
||||
Usage: "show digests",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print images using a Go template. will override --quiet",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter, f",
|
||||
Usage: "filter output based on conditions provided (default [])",
|
||||
Name: "quiet, q",
|
||||
Usage: "display only image IDs",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -83,6 +83,9 @@ var (
|
||||
)
|
||||
|
||||
func imagesCmd(c *cli.Context) error {
|
||||
if err := validateFlags(c, imagesFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -93,28 +96,10 @@ func imagesCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
noheading := false
|
||||
if c.IsSet("noheading") {
|
||||
noheading = c.Bool("noheading")
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("no-trunc") {
|
||||
truncate = !c.Bool("no-trunc")
|
||||
}
|
||||
digests := false
|
||||
if c.IsSet("digests") {
|
||||
digests = c.Bool("digests")
|
||||
}
|
||||
formatString := ""
|
||||
hasTemplate := false
|
||||
if c.IsSet("format") {
|
||||
formatString = c.String("format")
|
||||
hasTemplate = true
|
||||
}
|
||||
quiet := c.Bool("quiet")
|
||||
truncate := !c.Bool("no-trunc")
|
||||
digests := c.Bool("digests")
|
||||
hasTemplate := c.IsSet("format")
|
||||
|
||||
name := ""
|
||||
if len(c.Args()) == 1 {
|
||||
@@ -136,7 +121,7 @@ func imagesCmd(c *cli.Context) error {
|
||||
}
|
||||
var params *filterParams
|
||||
if c.IsSet("filter") {
|
||||
params, err = parseFilter(images, c.String("filter"))
|
||||
params, err = parseFilter(store, images, c.String("filter"))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing filter")
|
||||
}
|
||||
@@ -144,14 +129,14 @@ func imagesCmd(c *cli.Context) error {
|
||||
params = nil
|
||||
}
|
||||
|
||||
if len(images) > 0 && !noheading && !quiet && !hasTemplate {
|
||||
if len(images) > 0 && !c.Bool("noheading") && !quiet && !hasTemplate {
|
||||
outputHeader(truncate, digests)
|
||||
}
|
||||
|
||||
return outputImages(images, formatString, store, params, name, hasTemplate, truncate, digests, quiet)
|
||||
return outputImages(images, c.String("format"), store, params, name, hasTemplate, truncate, digests, quiet)
|
||||
}
|
||||
|
||||
func parseFilter(images []storage.Image, filter string) (*filterParams, error) {
|
||||
func parseFilter(store storage.Store, images []storage.Image, filter string) (*filterParams, error) {
|
||||
params := new(filterParams)
|
||||
filterStrings := strings.Split(filter, ",")
|
||||
for _, param := range filterStrings {
|
||||
@@ -166,17 +151,19 @@ func parseFilter(images []storage.Image, filter string) (*filterParams, error) {
|
||||
case "label":
|
||||
params.label = pair[1]
|
||||
case "before":
|
||||
beforeDate, err := setFilterDate(images, pair[1])
|
||||
beforeDate, err := setFilterDate(store, images, pair[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no such id: %s", pair[0])
|
||||
}
|
||||
params.beforeDate = beforeDate
|
||||
params.beforeImage = pair[1]
|
||||
case "since":
|
||||
sinceDate, err := setFilterDate(images, pair[1])
|
||||
sinceDate, err := setFilterDate(store, images, pair[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no such id: %s", pair[0])
|
||||
}
|
||||
params.sinceDate = sinceDate
|
||||
params.sinceImage = pair[1]
|
||||
case "reference":
|
||||
params.referencePattern = pair[1]
|
||||
default:
|
||||
@@ -186,12 +173,25 @@ func parseFilter(images []storage.Image, filter string) (*filterParams, error) {
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func setFilterDate(images []storage.Image, imgName string) (time.Time, error) {
|
||||
func setFilterDate(store storage.Store, images []storage.Image, imgName string) (time.Time, error) {
|
||||
for _, image := range images {
|
||||
for _, name := range image.Names {
|
||||
if matchesReference(name, imgName) {
|
||||
// Set the date to this image
|
||||
date := image.Created
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+image.ID)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error parsing reference to image %q: %v", image.ID, err)
|
||||
}
|
||||
img, err := ref.NewImage(nil)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error reading image %q: %v", image.ID, err)
|
||||
}
|
||||
defer img.Close()
|
||||
inspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error inspecting image %q: %v", image.ID, err)
|
||||
}
|
||||
date := inspect.Created
|
||||
return date, nil
|
||||
}
|
||||
}
|
||||
@@ -207,7 +207,7 @@ func outputHeader(truncate, digests bool) {
|
||||
}
|
||||
|
||||
if digests {
|
||||
fmt.Printf("%-64s ", "DIGEST")
|
||||
fmt.Printf("%-71s ", "DIGEST")
|
||||
}
|
||||
|
||||
fmt.Printf("%-22s %s\n", "CREATED AT", "SIZE")
|
||||
@@ -225,7 +225,7 @@ func outputImages(images []storage.Image, format string, store storage.Store, fi
|
||||
}
|
||||
}
|
||||
|
||||
names := []string{""}
|
||||
names := []string{}
|
||||
if len(image.Names) > 0 {
|
||||
names = image.Names
|
||||
} else {
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestFormatStringOutput(t *testing.T) {
|
||||
output := captureOutput(func() {
|
||||
outputUsingFormatString(true, true, params)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-12.12s %-40s %-64s %-22s %s\n", params.ID, params.Name, params.Digest, params.CreatedAt, params.Size)
|
||||
expectedOutput := fmt.Sprintf("%-20.12s %-56s %-64s %-22s %s\n", params.ID, params.Name, params.Digest, params.CreatedAt, params.Size)
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func TestOutputHeader(t *testing.T) {
|
||||
output := captureOutput(func() {
|
||||
outputHeader(true, false)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-12s %-40s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
expectedOutput := fmt.Sprintf("%-20s %-56s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func TestOutputHeader(t *testing.T) {
|
||||
output = captureOutput(func() {
|
||||
outputHeader(true, true)
|
||||
})
|
||||
expectedOutput = fmt.Sprintf("%-12s %-40s %-64s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "DIGEST", "CREATED AT", "SIZE")
|
||||
expectedOutput = fmt.Sprintf("%-20s %-56s %-71s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "DIGEST", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func TestOutputHeader(t *testing.T) {
|
||||
output = captureOutput(func() {
|
||||
outputHeader(false, false)
|
||||
})
|
||||
expectedOutput = fmt.Sprintf("%-64s %-40s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
expectedOutput = fmt.Sprintf("%-64s %-56s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestOutputImagesQuietTruncated(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -175,6 +175,12 @@ func TestOutputImagesQuietTruncated(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
// Tests quiet and truncated output
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "", store, nil, "", false, true, false, true)
|
||||
@@ -191,13 +197,19 @@ func TestOutputImagesQuietNotTruncated(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -219,13 +231,19 @@ func TestOutputImagesFormatString(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -247,13 +265,19 @@ func TestOutputImagesFormatTemplate(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -275,13 +299,19 @@ func TestOutputImagesArgNoMatch(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -305,13 +335,23 @@ func TestOutputMultipleImages(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull two images so that we know we have at least two
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
_, err = pullTestImage(t, "alpine:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -333,7 +373,7 @@ func TestParseFilterAllParams(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -344,18 +384,40 @@ func TestParseFilterAllParams(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=true,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
|
||||
params, err := parseFilter(images, label)
|
||||
params, err := parseFilter(store, images, label)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing filter")
|
||||
t.Fatalf("error parsing filter: %v", err)
|
||||
}
|
||||
|
||||
expectedParams := &filterParams{dangling: "true", label: "a=b", beforeImage: "busybox:latest", sinceImage: "busybox:latest", referencePattern: "abcdef"}
|
||||
ref, err := is.Transport.ParseStoreReference(store, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing store reference: %v", err)
|
||||
}
|
||||
img, err := ref.NewImage(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading image from store: %v", err)
|
||||
}
|
||||
defer img.Close()
|
||||
inspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
t.Fatalf("error inspecting image in store: %v", err)
|
||||
}
|
||||
|
||||
expectedParams := &filterParams{
|
||||
dangling: "true",
|
||||
label: "a=b",
|
||||
beforeImage: "busybox:latest",
|
||||
beforeDate: inspect.Created,
|
||||
sinceImage: "busybox:latest",
|
||||
sinceDate: inspect.Created,
|
||||
referencePattern: "abcdef",
|
||||
}
|
||||
if *params != *expectedParams {
|
||||
t.Errorf("filter did not return expected result\n\tExpected: %v\n\tReceived: %v", expectedParams, params)
|
||||
}
|
||||
@@ -365,7 +427,7 @@ func TestParseFilterInvalidDangling(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -376,13 +438,13 @@ func TestParseFilterInvalidDangling(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=NO,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
|
||||
_, err = parseFilter(images, label)
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || err.Error() != "invalid filter: 'dangling=[NO]'" {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
@@ -392,7 +454,7 @@ func TestParseFilterInvalidBefore(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -403,13 +465,13 @@ func TestParseFilterInvalidBefore(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=false,label=a=b,before=:,since=busybox:latest,reference=abcdef"
|
||||
_, err = parseFilter(images, label)
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || !strings.Contains(err.Error(), "no such id") {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
@@ -419,7 +481,7 @@ func TestParseFilterInvalidSince(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -430,13 +492,13 @@ func TestParseFilterInvalidSince(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=false,label=a=b,before=busybox:latest,since=:,reference=abcdef"
|
||||
_, err = parseFilter(images, label)
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || !strings.Contains(err.Error(), "no such id") {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
@@ -446,7 +508,7 @@ func TestParseFilterInvalidFilter(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
@@ -457,13 +519,13 @@ func TestParseFilterInvalidFilter(t *testing.T) {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "foo=bar"
|
||||
_, err = parseFilter(images, label)
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || err.Error() != "invalid filter: 'foo'" {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
@@ -501,12 +563,19 @@ func TestMatchesBeforeImageTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -525,12 +594,17 @@ func TestMatchesBeforeImageFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -550,12 +624,17 @@ func TestMatchesSinceeImageTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
@@ -574,12 +653,17 @@ func TestMatchesSinceImageFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
|
||||
@@ -21,14 +21,15 @@ ID: {{.ContainerID}}
|
||||
|
||||
var (
|
||||
inspectFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "type, t",
|
||||
Usage: "look at the item of the specified `type` (container or image) and name",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "use `format` as a Go template to format the output",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "type, t",
|
||||
Usage: "look at the item of the specified `type` (container or image) and name",
|
||||
Value: inspectTypeContainer,
|
||||
},
|
||||
}
|
||||
inspectDescription = "Inspects a build container's or built image's configuration."
|
||||
inspectCommand = cli.Command{
|
||||
@@ -51,23 +52,13 @@ func inspectCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
|
||||
itemType := inspectTypeContainer
|
||||
if c.IsSet("type") {
|
||||
itemType = c.String("type")
|
||||
}
|
||||
switch itemType {
|
||||
case inspectTypeContainer:
|
||||
case inspectTypeImage:
|
||||
default:
|
||||
return errors.Errorf("the only recognized types are %q and %q", inspectTypeContainer, inspectTypeImage)
|
||||
if err := validateFlags(c, inspectFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := defaultFormat
|
||||
if c.IsSet("format") {
|
||||
if c.String("format") != "" {
|
||||
format = c.String("format")
|
||||
}
|
||||
if c.String("format") != "" {
|
||||
format = c.String("format")
|
||||
}
|
||||
t := template.Must(template.New("format").Parse(format))
|
||||
|
||||
@@ -78,17 +69,25 @@ func inspectCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
switch itemType {
|
||||
switch c.String("type") {
|
||||
case inspectTypeContainer:
|
||||
builder, err = openBuilder(store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
if c.IsSet("type") {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
builder, err = openImage(store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build object %q", name)
|
||||
}
|
||||
}
|
||||
case inspectTypeImage:
|
||||
builder, err = openImage(store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading image %q", name)
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("the only recognized types are %q and %q", inspectTypeContainer, inspectTypeImage)
|
||||
}
|
||||
|
||||
if c.IsSet("format") {
|
||||
|
||||
@@ -4,15 +4,17 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage"
|
||||
ispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
rspecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
debug := false
|
||||
|
||||
var defaultStoreDriverOptions *cli.StringSlice
|
||||
if buildah.InitReexec() {
|
||||
return
|
||||
@@ -27,6 +29,10 @@ func main() {
|
||||
defaultStoreDriverOptions = &optionSlice
|
||||
}
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "print debugging information",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "root",
|
||||
Usage: "storage root dir",
|
||||
@@ -47,17 +53,17 @@ func main() {
|
||||
Usage: "storage driver option",
|
||||
Value: defaultStoreDriverOptions,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "print debugging information",
|
||||
cli.StringFlag{
|
||||
Name: "default-mounts-file",
|
||||
Usage: "path to default mounts file",
|
||||
Value: buildah.DefaultMountsFile,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if c.GlobalIsSet("debug") {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalBool("debug") {
|
||||
debug = true
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -92,7 +98,11 @@ func main() {
|
||||
}
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
logrus.Errorf("%v", err)
|
||||
os.Exit(1)
|
||||
if debug {
|
||||
logrus.Errorf(err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
}
|
||||
cli.OsExiter(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,15 +30,15 @@ func mountCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
if err := validateFlags(c, mountFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("notruncate") {
|
||||
truncate = !c.Bool("notruncate")
|
||||
}
|
||||
truncate := !c.Bool("notruncate")
|
||||
|
||||
if len(args) == 1 {
|
||||
name := args[0]
|
||||
@@ -46,7 +46,7 @@ func mountCmd(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
mountPoint, err := builder.Mount("")
|
||||
mountPoint, err := builder.Mount(builder.MountLabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mounting %q container %q", name, builder.Container)
|
||||
}
|
||||
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
@@ -15,9 +17,9 @@ import (
|
||||
|
||||
var (
|
||||
pushFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
@@ -29,17 +31,25 @@ var (
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Require HTTPS and verify certificates when accessing the registry",
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "manifest type (oci, v2s1, or v2s2) to use when saving image using the 'dir:' transport (default is manifest type of source)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pushing images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pushing images",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
pushDescription = fmt.Sprintf(`
|
||||
@@ -68,20 +78,15 @@ func pushCmd(c *cli.Context) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New("source and destination image IDs must be specified")
|
||||
}
|
||||
if err := validateFlags(c, pushFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
src := args[0]
|
||||
destSpec := args[1]
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
compress := archive.Uncompressed
|
||||
if !c.IsSet("disable-compression") || !c.Bool("disable-compression") {
|
||||
compress = archive.Gzip
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
compress := archive.Gzip
|
||||
if c.Bool("disable-compression") {
|
||||
compress = archive.Uncompressed
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
@@ -109,13 +114,28 @@ func pushCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if c.IsSet("format") {
|
||||
switch c.String("format") {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2", "docker":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", c.String("format"))
|
||||
}
|
||||
}
|
||||
|
||||
options := buildah.PushOptions{
|
||||
Compression: compress,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
ManifestType: manifestType,
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
Store: store,
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -31,15 +32,15 @@ var (
|
||||
)
|
||||
|
||||
func rmiCmd(c *cli.Context) error {
|
||||
force := false
|
||||
if c.IsSet("force") {
|
||||
force = c.Bool("force")
|
||||
}
|
||||
force := c.Bool("force")
|
||||
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
return errors.Errorf("image name or ID must be specified")
|
||||
}
|
||||
if err := validateFlags(c, rmiFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
@@ -58,7 +59,10 @@ func rmiCmd(c *cli.Context) error {
|
||||
}
|
||||
if len(ctrIDs) > 0 && len(image.Names) <= 1 {
|
||||
if force {
|
||||
removeContainers(ctrIDs, store)
|
||||
err = removeContainers(ctrIDs, store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error removing containers %v for image %q", ctrIDs, id)
|
||||
}
|
||||
} else {
|
||||
for _, ctrID := range ctrIDs {
|
||||
return fmt.Errorf("Could not remove image %q (must force) - container %q is using its reference image", id, ctrID)
|
||||
@@ -98,16 +102,16 @@ func getImage(id string, store storage.Store) (*storage.Image, error) {
|
||||
var ref types.ImageReference
|
||||
ref, err := properImageRef(id)
|
||||
if err != nil {
|
||||
//logrus.Debug(err)
|
||||
logrus.Debug(err)
|
||||
}
|
||||
if ref == nil {
|
||||
if ref, err = storageImageRef(store, id); err != nil {
|
||||
//logrus.Debug(err)
|
||||
logrus.Debug(err)
|
||||
}
|
||||
}
|
||||
if ref == nil {
|
||||
if ref, err = storageImageID(store, id); err != nil {
|
||||
//logrus.Debug(err)
|
||||
logrus.Debug(err)
|
||||
}
|
||||
}
|
||||
if ref != nil {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestProperImageRefTrue(t *testing.T) {
|
||||
// Pull an image so we know we have it
|
||||
err := pullTestImage("busybox:latest")
|
||||
_, err := pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove")
|
||||
}
|
||||
@@ -25,7 +25,7 @@ func TestProperImageRefTrue(t *testing.T) {
|
||||
|
||||
func TestProperImageRefFalse(t *testing.T) {
|
||||
// Pull an image so we know we have it
|
||||
err := pullTestImage("busybox:latest")
|
||||
_, err := pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatal("could not pull image to remove")
|
||||
}
|
||||
@@ -40,8 +40,7 @@ func TestStorageImageRefTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
options := storage.DefaultStoreOptions
|
||||
store, err := storage.GetStore(options)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
@@ -49,7 +48,7 @@ func TestStorageImageRefTrue(t *testing.T) {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
@@ -65,8 +64,7 @@ func TestStorageImageRefFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
options := storage.DefaultStoreOptions
|
||||
store, err := storage.GetStore(options)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
@@ -74,7 +72,7 @@ func TestStorageImageRefFalse(t *testing.T) {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
@@ -88,8 +86,7 @@ func TestStorageImageIDTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
options := storage.DefaultStoreOptions
|
||||
store, err := storage.GetStore(options)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
@@ -97,7 +94,7 @@ func TestStorageImageIDTrue(t *testing.T) {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
err = pullTestImage("busybox:latest")
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
@@ -126,8 +123,7 @@ func TestStorageImageIDFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
options := storage.DefaultStoreOptions
|
||||
store, err := storage.GetStore(options)
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
@@ -6,15 +6,19 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
runFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "Set the hostname inside of the container",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "runtime",
|
||||
Usage: "`path` to an alternate runtime",
|
||||
@@ -24,14 +28,14 @@ var (
|
||||
Name: "runtime-flag",
|
||||
Usage: "add global flags for the container runtime",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "bind mount a host location into the container while running the command",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tty",
|
||||
Usage: "allocate a pseudo-TTY in the container",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "bind mount a host location into the container while running the command",
|
||||
},
|
||||
}
|
||||
runDescription = "Runs a specified command using the container's root filesystem as a root\n filesystem, using configuration settings inherited from the container's\n image or as specified using previous calls to the config command"
|
||||
runCommand = cli.Command{
|
||||
@@ -50,24 +54,15 @@ func runCmd(c *cli.Context) error {
|
||||
return errors.Errorf("container ID must be specified")
|
||||
}
|
||||
name := args[0]
|
||||
if err := validateFlags(c, runFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args = args.Tail()
|
||||
if len(args) > 0 && args[0] == "--" {
|
||||
args = args[1:]
|
||||
}
|
||||
|
||||
runtime := ""
|
||||
if c.IsSet("runtime") {
|
||||
runtime = c.String("runtime")
|
||||
}
|
||||
flags := []string{}
|
||||
if c.IsSet("runtime-flag") {
|
||||
flags = c.StringSlice("runtime-flag")
|
||||
}
|
||||
volumes := []string{}
|
||||
if c.IsSet("v") || c.IsSet("volume") {
|
||||
volumes = c.StringSlice("volume")
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -78,14 +73,10 @@ func runCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
|
||||
hostname := ""
|
||||
if c.IsSet("hostname") {
|
||||
hostname = c.String("hostname")
|
||||
}
|
||||
options := buildah.RunOptions{
|
||||
Hostname: hostname,
|
||||
Runtime: runtime,
|
||||
Args: flags,
|
||||
Hostname: c.String("hostname"),
|
||||
Runtime: c.String("runtime"),
|
||||
Args: c.StringSlice("runtime-flag"),
|
||||
}
|
||||
|
||||
if c.IsSet("tty") {
|
||||
@@ -96,7 +87,7 @@ func runCmd(c *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
for _, volumeSpec := range volumes {
|
||||
for _, volumeSpec := range c.StringSlice("volume") {
|
||||
volSpec := strings.Split(volumeSpec, ":")
|
||||
if len(volSpec) >= 2 {
|
||||
mountOptions := "bind"
|
||||
|
||||
13
commit.go
@@ -2,10 +2,10 @@ package buildah
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/signature"
|
||||
is "github.com/containers/image/storage"
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -79,6 +80,9 @@ type PushOptions struct {
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
// ManifestType is the format to use when saving the imge using the 'dir' transport
|
||||
// possible options are oci, v2s1, and v2s2
|
||||
ManifestType string
|
||||
}
|
||||
|
||||
// shallowCopy copies the most recent layer, the configuration, and the manifest from one image to another.
|
||||
@@ -255,7 +259,7 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
||||
}
|
||||
if exporting {
|
||||
// Copy everything.
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext))
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, ""))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layers and metadata")
|
||||
}
|
||||
@@ -327,9 +331,12 @@ func Push(image string, dest types.ImageReference, options PushOptions) error {
|
||||
return errors.Wrapf(err, "error recomputing layer digests and building metadata")
|
||||
}
|
||||
// Copy everything.
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext))
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, options.ManifestType))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layers and metadata")
|
||||
}
|
||||
if options.ReportWriter != nil {
|
||||
fmt.Fprintf(options.ReportWriter, "\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,11 +7,12 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext) *cp.Options {
|
||||
func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
|
||||
return &cp.Options{
|
||||
ReportWriter: reportWriter,
|
||||
SourceCtx: sourceSystemContext,
|
||||
DestinationCtx: destinationSystemContext,
|
||||
ReportWriter: reportWriter,
|
||||
SourceCtx: sourceSystemContext,
|
||||
DestinationCtx: destinationSystemContext,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -553,3 +553,8 @@ func (b *Builder) Domainname() string {
|
||||
func (b *Builder) SetDomainname(name string) {
|
||||
b.Docker.Config.Domainname = name
|
||||
}
|
||||
|
||||
// SetDefaultMountsFilePath sets the mounts file path for testing purposes
|
||||
func (b *Builder) SetDefaultMountsFilePath(path string) {
|
||||
b.DefaultMountsFilePath = path
|
||||
}
|
||||
|
||||
@@ -168,6 +168,7 @@ return 1
|
||||
--runroot
|
||||
--storage-driver
|
||||
--storage-opt
|
||||
--default-mounts-file
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
@@ -340,9 +341,11 @@ return 1
|
||||
--pull-always
|
||||
--quiet
|
||||
-q
|
||||
--tls-verify
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--signature-policy
|
||||
--runtime
|
||||
--runtime-flag
|
||||
@@ -384,6 +387,7 @@ return 1
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--hostname
|
||||
--runtime
|
||||
--runtime-flag
|
||||
--volume
|
||||
@@ -478,8 +482,11 @@ return 1
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--format
|
||||
-f
|
||||
--signature-policy
|
||||
"
|
||||
|
||||
@@ -624,6 +631,7 @@ return 1
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--name
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
%global shortcommit %(c=%{commit}; echo ${c:0:7})
|
||||
|
||||
Name: buildah
|
||||
Version: 0.3
|
||||
Version: 0.7
|
||||
Release: 1.git%{shortcommit}%{?dist}
|
||||
Summary: A command line tool used to creating OCI Images
|
||||
License: ASL 2.0
|
||||
@@ -44,6 +44,7 @@ BuildRequires: libassuan-devel
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: ostree-devel
|
||||
Requires: runc >= 1.0.0-6
|
||||
Requires: container-selinux
|
||||
Requires: skopeo-containers
|
||||
Provides: %{repo} = %{version}-%{release}
|
||||
|
||||
@@ -87,6 +88,49 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
%{_datadir}/bash-completion/completions/*
|
||||
|
||||
%changelog
|
||||
* Thu Nov 16 2017 Dan Walsh <dwalsh@redhat.com> 0.7-1
|
||||
- Ignore errors when trying to read containers buildah.json for loading SELinux reservations
|
||||
- Use credentials from kpod login for buildah
|
||||
|
||||
* Wed Nov 15 2017 Dan Walsh <dwalsh@redhat.com> 0.6-1
|
||||
- Adds support for converting manifest types when using the dir transport
|
||||
- Rework how we do UID resolution in images
|
||||
- Bump github.com/vbatts/tar-split
|
||||
- Set option.terminal appropriately in run
|
||||
|
||||
* Tue Nov 07 2017 Dan Walsh <dwalsh@redhat.com> 0.5-1
|
||||
- Add secrets patch to buildah
|
||||
- Add proper SELinux labeling to buildah run
|
||||
- Add tls-verify to bud command
|
||||
- Make filtering by date use the image's date
|
||||
- images: don't list unnamed images twice
|
||||
- Fix timeout issue
|
||||
- Add further tty verbiage to buildah run
|
||||
- Make inspect try an image on failure if type not specified
|
||||
- Add support for `buildah run --hostname`
|
||||
- Tons of bug fixes and code cleanup
|
||||
|
||||
* Fri Sep 22 2017 Dan Walsh <dwalsh@redhat.com> 0.4-1.git9cbccf88c
|
||||
- Add default transport to push if not provided
|
||||
- Avoid trying to print a nil ImageReference
|
||||
- Add authentication to commit and push
|
||||
- Add information on buildah from man page on transports
|
||||
- Remove --transport flag
|
||||
- Run: do not complain about missing volume locations
|
||||
- Add credentials to buildah from
|
||||
- Remove export command
|
||||
- Run(): create the right working directory
|
||||
- Improve "from" behavior with unnamed references
|
||||
- Avoid parsing image metadata for dates and layers
|
||||
- Read the image's creation date from public API
|
||||
- Bump containers/storage and containers/image
|
||||
- Don't panic if an image's ID can't be parsed
|
||||
- Turn on --enable-gc when running gometalinter
|
||||
- rmi: handle truncated image IDs
|
||||
|
||||
* Thu Jul 20 2017 Dan Walsh <dwalsh@redhat.com> 0.3.0-1
|
||||
- Bump for inclusion of OCI 1.0 Runtime and Image Spec
|
||||
|
||||
* Tue Jul 18 2017 Dan Walsh <dwalsh@redhat.com> 0.2.0-1
|
||||
- buildah run: Add support for -- ending options parsing
|
||||
- buildah Add/Copy support for glob syntax
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -13,5 +14,5 @@ func (b *Builder) Delete() error {
|
||||
b.MountPoint = ""
|
||||
b.Container = ""
|
||||
b.ContainerID = ""
|
||||
return nil
|
||||
return label.ReleaseLabel(b.ProcessLabel)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,18 @@ to a temporary location.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--build-arg** *arg=value*
|
||||
|
||||
Specifies a build argument and its value, which will be interpolated in
|
||||
instructions read from the Dockerfiles in the same way that environment
|
||||
variables are, but which will not be added to environment variable list in the
|
||||
resulting image's configuration.
|
||||
|
||||
**-f, --file** *Dockerfile*
|
||||
|
||||
Specifies a Dockerfile which contains instructions for building the image,
|
||||
@@ -25,6 +37,12 @@ If a build context is not specified, and at least one Dockerfile is a
|
||||
local file, the directory in which it resides will be used as the build
|
||||
context.
|
||||
|
||||
**--format**
|
||||
|
||||
Control the format for the built image's manifest and configuration data.
|
||||
Recognized formats include *oci* (OCI image-spec v1.0, the default) and
|
||||
*docker* (version 2, using schema format 2 for the manifest).
|
||||
|
||||
**--pull**
|
||||
|
||||
Pull the image if it is not present. If this flag is disabled (with
|
||||
@@ -35,18 +53,11 @@ Defaults to *true*.
|
||||
|
||||
Pull the image even if a version of the image is already present.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
**--quiet**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--build-arg** *arg=value*
|
||||
|
||||
Specifies a build argument and its value, which will be interpolated in
|
||||
instructions read from the Dockerfiles in the same way that environment
|
||||
variables are, but which will not be added to environment variable list in the
|
||||
resulting image's configuration.
|
||||
Suppress output messages which indicate which instruction is being processed,
|
||||
and of progress when pulling images from a registry, and when writing the
|
||||
output image.
|
||||
|
||||
**--runtime** *path*
|
||||
|
||||
@@ -57,22 +68,20 @@ commands specified by the **RUN** instruction.
|
||||
|
||||
Adds global flags for the container rutime.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**-t, --tag** *imageName*
|
||||
|
||||
Specifies the name which will be assigned to the resulting image if the build
|
||||
process completes successfully.
|
||||
|
||||
**--format**
|
||||
**--tls-verify** *bool-value*
|
||||
|
||||
Control the format for the built image's manifest and configuration data.
|
||||
Recognized formats include *oci* (OCI image-spec v1.0, the default) and
|
||||
*docker* (version 2, using schema format 2 for the manifest).
|
||||
|
||||
**--quiet**
|
||||
|
||||
Suppress output messages which indicate which instruction is being processed,
|
||||
and of progress when pulling images from a registry, and when writing the
|
||||
output image.
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
@@ -84,7 +93,9 @@ buildah bud -f Dockerfile.simple -f Dockerfile.notsosimple
|
||||
|
||||
buildah bud -t imageName .
|
||||
|
||||
buildah bud -t imageName -f Dockerfile.simple
|
||||
buildah bud --tls-verify=true -t imageName -f Dockerfile.simple
|
||||
|
||||
buildah bud --tls-verify=false -t imageName .
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
|
||||
@@ -25,6 +25,21 @@ The username[:password] to use to authenticate with the registry if required.
|
||||
|
||||
Don't compress filesystem layers when building the image.
|
||||
|
||||
|
||||
**--format**
|
||||
|
||||
Control the format for the image manifest and configuration data. Recognized
|
||||
formats include *oci* (OCI image-spec v1.0, the default) and *docker* (version
|
||||
2, using schema format 2 for the manifest).
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--rm**
|
||||
Remove the container and its content after committing it to an image.
|
||||
Default leaves the container and its content in place.
|
||||
|
||||
**--signature-policy**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
@@ -35,20 +50,6 @@ option be used, as the default behavior of using the system-wide default policy
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--format**
|
||||
|
||||
Control the format for the image manifest and configuration data. Recognized
|
||||
formats include *oci* (OCI image-spec v1.0, the default) and *docker* (version
|
||||
2, using schema format 2 for the manifest).
|
||||
|
||||
**--rm**
|
||||
Remove the container and its content after committing it to an image.
|
||||
Default leaves the container and its content in place.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
This example saves an image based on the container.
|
||||
|
||||
@@ -7,11 +7,16 @@ buildah containers - List the working containers and their base images.
|
||||
**buildah** **containers** [*options* [...]]
|
||||
|
||||
## DESCRIPTION
|
||||
Lists containers which appear to be buildah working containers, their names and
|
||||
Lists containers which appear to be Buildah working containers, their names and
|
||||
IDs, and the names and IDs of the images from which they were initialized.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
List information about all containers, including those which were not created
|
||||
by and are not being used by Buildah.
|
||||
|
||||
**--json**
|
||||
|
||||
Output in JSON format.
|
||||
@@ -28,11 +33,6 @@ Do not truncate IDs in output.
|
||||
|
||||
Displays only the container IDs.
|
||||
|
||||
**--all, -a**
|
||||
|
||||
List information about all containers, including those which were not created
|
||||
by and are not being used by buildah.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah containers
|
||||
|
||||
@@ -17,7 +17,7 @@ Multiple transports are supported:
|
||||
An existing local directory _path_ retrieving the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_ (Default)
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_
|
||||
An image is retrieved as a `docker load` formatted file.
|
||||
@@ -36,6 +36,11 @@ The container ID of the container that was created. On error, -1 is returned an
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry
|
||||
@@ -58,6 +63,10 @@ Defaults to *true*.
|
||||
|
||||
Pull the image even if a version of the image is already present.
|
||||
|
||||
**--quiet**
|
||||
|
||||
If an image needs to be pulled from the registry, suppress progress output.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
@@ -68,10 +77,6 @@ option be used, as the default behavior of using the system-wide default policy
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--quiet**
|
||||
|
||||
If an image needs to be pulled from the registry, suppress progress output.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah from imagename --pull
|
||||
@@ -86,5 +91,7 @@ buildah from myregistry/myrepository/imagename:imagetag --tls-verify=false
|
||||
|
||||
buildah from myregistry/myrepository/imagename:imagetag --creds=myusername:mypassword --cert-dir ~/auth
|
||||
|
||||
buildah from myregistry/myrepository/imagename:imagetag --authfile=/tmp/auths/myauths.json
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
|
||||
@@ -11,10 +11,6 @@ Displays locally stored images, their names, and their IDs.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--json**
|
||||
|
||||
Display the output in JSON format.
|
||||
|
||||
**--digests**
|
||||
|
||||
Show the image digests.
|
||||
@@ -28,6 +24,10 @@ keywords are 'dangling', 'label', 'before' and 'since'.
|
||||
|
||||
Pretty-print images using a Go template. Will override --quiet
|
||||
|
||||
**--json**
|
||||
|
||||
Display the output in JSON format.
|
||||
|
||||
**--noheading, -n**
|
||||
|
||||
Omit the table headings from the listing of images.
|
||||
|
||||
@@ -7,7 +7,7 @@ buildah inspect - Display information about working containers or images.
|
||||
**buildah** **inspect** [*options* [...] --] **ID**
|
||||
|
||||
## DESCRIPTION
|
||||
Prints the low-level information on buildah object(s) (e.g. container, images) identified by name or ID. By default, this will render all results in a
|
||||
Prints the low-level information on Buildah object(s) (e.g. container, images) identified by name or ID. By default, this will render all results in a
|
||||
JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified,
|
||||
the given template will be executed for each result.
|
||||
|
||||
@@ -19,7 +19,7 @@ Use *template* as a Go template when formatting the output.
|
||||
|
||||
Users of this option should be familiar with the [*text/template*
|
||||
package](https://golang.org/pkg/text/template/) in the Go standard library, and
|
||||
of internals of buildah's implementation.
|
||||
of internals of Buildah's implementation.
|
||||
|
||||
**--type** *container* | *image*
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Image stored in local container/storage
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -40,6 +40,11 @@ Image stored in local container/storage
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry
|
||||
@@ -52,6 +57,14 @@ The username[:password] to use to authenticate with the registry if required.
|
||||
|
||||
Don't compress copies of filesystem layers which will be pushed.
|
||||
|
||||
**--format, -f**
|
||||
|
||||
Manifest Type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--signature-policy**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
@@ -62,10 +75,6 @@ option be used, as the default behavior of using the system-wide default policy
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
This example extracts the imageID image to a local directory in docker format.
|
||||
@@ -80,6 +89,10 @@ This example extracts the imageID image to a container registry named registry.e
|
||||
|
||||
`# buildah push imageID docker://registry.example.com/repository:tag`
|
||||
|
||||
This example extracts the imageID image to a private container registry named registry.example.com with authentication from /tmp/auths/myauths.json.
|
||||
|
||||
`# buildah push --authfile /tmp/auths/myauths.json imageID docker://registry.example.com/repository:tag`
|
||||
|
||||
This example extracts the imageID image and puts into the local docker container store.
|
||||
|
||||
`# buildah push imageID docker-daemon:image:tag`
|
||||
@@ -91,4 +104,4 @@ This example extracts the imageID image and puts it into the registry on the loc
|
||||
`# buildah push --cert-dir ~/auth --tls-verify=true --creds=username:password imageID docker://localhost:5000/my-imageID`
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
## buildah-run "1" "March 2017" "buildah"
|
||||
|
||||
## NAME
|
||||
@@ -11,16 +10,13 @@ buildah run - Run a command inside of the container.
|
||||
Launches a container and runs the specified command in that container using the
|
||||
container's root filesystem as a root filesystem, using configuration settings
|
||||
inherited from the container's image or as specified using previous calls to
|
||||
the *buildah config* command.
|
||||
the *buildah config* command. If you execute *buildah run* and expect an
|
||||
interactive shell, you need to specify the --tty flag.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--tty**
|
||||
|
||||
By default a pseudo-TTY is allocated only when buildah's standard input is
|
||||
attached to a pseudo-TTY. Setting the `--tty` option to `true` will cause a
|
||||
pseudo-TTY to be allocated inside the container. Setting the `--tty` option to
|
||||
`false` will prevent the pseudo-TTY from being allocated.
|
||||
**--hostname**
|
||||
Set the hostname inside of the running container.
|
||||
|
||||
**--runtime** *path*
|
||||
|
||||
@@ -32,6 +28,14 @@ Adds global flags for the container runtime. To list the supported flags, please
|
||||
consult manpages of your selected container runtime (`runc` is the default
|
||||
runtime, the manpage to consult is `runc(8)`)
|
||||
|
||||
**--tty**
|
||||
|
||||
By default a pseudo-TTY is allocated only when buildah's standard input is
|
||||
attached to a pseudo-TTY. Setting the `--tty` option to `true` will cause a
|
||||
pseudo-TTY to be allocated inside the container connecting the user's "terminal"
|
||||
with the stdin and stdout stream of the container. Setting the `--tty` option to
|
||||
`false` will prevent the pseudo-TTY from being allocated.
|
||||
|
||||
**--volume, -v** *source*:*destination*:*flags*
|
||||
|
||||
Bind mount a location from the host into the container for its lifetime.
|
||||
@@ -43,7 +47,13 @@ options to the command inside of the container
|
||||
|
||||
buildah run containerID -- ps -auxw
|
||||
|
||||
buildah run containerID --hostname myhost -- ps -auxw
|
||||
|
||||
buildah run containerID --runtime-flag --no-new-keyring -- ps -auxw
|
||||
|
||||
buildah run --tty containerID /bin/bash
|
||||
|
||||
buildah run --tty=false containerID ls /
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## buildah-version "1" "June 2017" "buildah"
|
||||
## buildah-version "1" "June 2017" "Buildah"
|
||||
|
||||
## NAME
|
||||
buildah version - Display the Buildah Version Information.
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
## buildah "1" "March 2017" "buildah"
|
||||
|
||||
## NAME
|
||||
buildah - A command line tool to facilitate working with containers and using them to build images.
|
||||
Buildah - A command line tool to facilitate working with containers and using them to build images.
|
||||
|
||||
## SYNOPSIS
|
||||
buildah [OPTIONS] COMMAND [ARG...]
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
The buildah package provides a command line tool which can be used to:
|
||||
The Buildah package provides a command line tool which can be used to:
|
||||
|
||||
* Create a working container, either from scratch or using an image as a starting point.
|
||||
* Mount a working container's root filesystem for manipulation.
|
||||
@@ -18,6 +18,18 @@ The buildah package provides a command line tool which can be used to:
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--debug**
|
||||
|
||||
Print debugging information
|
||||
|
||||
**--default-mounts-file**
|
||||
|
||||
path to default mounts file (default path: "/usr/share/containers/mounts.conf")
|
||||
|
||||
**--help, -h**
|
||||
|
||||
Show help
|
||||
|
||||
**--root** **value**
|
||||
|
||||
Storage root dir (default: "/var/lib/containers/storage")
|
||||
@@ -34,14 +46,6 @@ Storage driver
|
||||
|
||||
Storage driver option
|
||||
|
||||
**--debug**
|
||||
|
||||
Print debugging information
|
||||
|
||||
**--help, -h**
|
||||
|
||||
Show help
|
||||
|
||||
**--version, -v**
|
||||
|
||||
Print the version
|
||||
|
||||
236
docs/tutorials/01-intro.md
Normal file
@@ -0,0 +1,236 @@
|
||||
# Buildah Tutorial 1
|
||||
## Building OCI container images
|
||||
|
||||
The purpose of this tutorial is to demonstrate how Buildah can be used to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec). Images can be built from existing images, from scratch, and using Dockerfiles. OCI images built using the Buildah command line tool (CLI) and the underlying OCI based technologies (e.g. [containers/image](https://github.com/containers/image) and [containers/storage](https://github.com/containers/storage)) are portable and can therefore run in a Docker environment.
|
||||
|
||||
In brief the `containers/image` project provides mechanisms to copy, push, pull, inspect and sign container images. The `containers/storage` project provides mechanisms for storing filesystem layers, container images, and containers. Buildah is a CLI that takes advantage of these underlying projects and therefore allows you to build, move, and manage container images and containers.
|
||||
|
||||
First step is to install Buildah. Run as root because you will need to be root for running Buildah commands:
|
||||
|
||||
# dnf -y install buildah
|
||||
|
||||
After installing Buildah we can see there are no images installed. The `buildah images` command will list all the images:
|
||||
|
||||
# buildah images
|
||||
|
||||
We can also see that there are also no containers by running:
|
||||
|
||||
# buildah containers
|
||||
|
||||
When you build a working container from an existing image, Buildah defaults to appending '-working-container' to the image's name to construct a name for the container. The Buildah CLI conveniently returns the name of the new container. You can take advantage of this by assigning the returned value to a shell varible using standard shell assignment :
|
||||
|
||||
# container=$(buildah from fedora)
|
||||
|
||||
It is not required to assign a shell variable. Running `buildah from fedora` is sufficient. It just helps simplify commands later. To see the name of the container that we stored in the shell variable:
|
||||
|
||||
# echo $container
|
||||
|
||||
What can we do with this new container? Let's try running bash:
|
||||
|
||||
# buildah run $container bash
|
||||
|
||||
Notice we get a new shell prompt because we are running a bash shell inside of the container. It should be noted that `buildah run` is primarily intended for helping debug during the build process. A runtime like runc or a container interface like [CRI-O](https://github.com/kubernetes-incubator/cri-o) is more suited for starting containers in production.
|
||||
|
||||
Be sure to `exit` out of the container and let's try running something else:
|
||||
|
||||
# buildah run $container java
|
||||
|
||||
Oops. Java is not installed. A message containing something like the following was returned.
|
||||
|
||||
container_linux.go:274: starting container process caused "exec: \"java\": executable file not found in $PATH"
|
||||
|
||||
Lets try installing it using:
|
||||
|
||||
# buildah run $container -- dnf -y install java
|
||||
|
||||
The `--` syntax basically tells Buildah: there are no more `buildah run` command options after this point. The options after this point are for inside the containers shell. It is required if the command we specify includes command line options which are not meant for Buildah.
|
||||
|
||||
Now running `buildah run $container java` will show that Java has been installed. It will return the standard Java `Usage` output.
|
||||
|
||||
## Building a container from scratch
|
||||
|
||||
One of the advantages of using `buildah` to build OCI compliant container images is that you can easily build a container image from scratch and therefore exclude unnecessary packages from your image. E.g. most final container images for production probably don't need a package manager like `dnf`.
|
||||
|
||||
Let's build a container from scratch. The special "image" name "scratch" tells Buildah to create an empty container. The container has a small amount of metadata about the container but no real Linux content.
|
||||
|
||||
# newcontainer=$(buildah from scratch)
|
||||
|
||||
You can see this new empty container by running:
|
||||
|
||||
# buildah containers
|
||||
|
||||
You should see output similar to the following:
|
||||
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
82af3b9a9488 * 3d85fcda5754 docker.io/library/fedora:latest fedora-working-container
|
||||
ac8fa6be0f0a * scratch working-container
|
||||
|
||||
Its container name is working-container by default and it's stored in the `$newcontainer` variable. Notice the image name (IMAGE NAME) is "scratch". This just indicates that there is no real image yet. i.e. It is containers/storage but there is no representation in containers/image. So when we run:
|
||||
|
||||
# buildah images
|
||||
|
||||
We don't see the image listed. There is no corresponding scratch image. It is an empty container.
|
||||
|
||||
So does this container actually do anything? Let's see.
|
||||
|
||||
# buildah run $newcontainer bash
|
||||
|
||||
Nope. This really is empty. The package installer `dnf` is not even inside this container. It's essentially an empty layer on top of the kernel. So what can be done with that? Thankfully there is a `buildah mount` command.
|
||||
|
||||
# scratchmnt=$(buildah mount $newcontainer)
|
||||
|
||||
By echoing `$scratchmnt` we can see the path for the [overlay image](https://wiki.archlinux.org/index.php/Overlay_filesystem), which gives you a link directly to the root file system of the container.
|
||||
|
||||
# echo $scratchmnt
|
||||
/var/lib/containers/storage/overlay/b78d0e11957d15b5d1fe776293bd40a36c28825fb6cf76f407b4d0a95b2a200d/diff
|
||||
|
||||
Notice that the overlay image is under `/var/lib/containers/storage` as one would expect. (See above on `containers/storage` or for more information see [containers/storage](https://github.com/containers/storage).)
|
||||
|
||||
Now that we have a new empty container we can install or remove software packages or simply copy content into that container. So let's install `bash` and `coreutils` so that we can run bash scripts. This could easily be `nginx` or other packages needed for your container.
|
||||
|
||||
# dnf install --installroot $scratchmnt --release 26 bash coreutils --setopt install_weak_deps=false -y
|
||||
|
||||
Let's try it out (showing the prompt in this example to demonstrate the difference):
|
||||
|
||||
# buildah run $newcontainer bash
|
||||
bash-4.4# cd /usr/bin
|
||||
bash-4.4# ls
|
||||
bash-4.4# exit
|
||||
|
||||
Notice we have a `/usr/bin` directory in the newcontainer's image layer. Let's first copy a simple file from our host into the container. Create a file called runecho.sh which contains the following:
|
||||
|
||||
#!/bin/bash
|
||||
for i in `seq 0 9`;
|
||||
do
|
||||
echo "This is a new container from ipbabble [" $i "]"
|
||||
done
|
||||
|
||||
Change the permissions on the file so that it can be run:
|
||||
|
||||
# chmod +x runecho.sh
|
||||
|
||||
With `buildah` files can be copied into the new image and we can also configure the image to run commands. Let's copy this new command into the container's `/usr/bin` directory and configure the container to run the command when the container is run:
|
||||
|
||||
# buildah copy $newcontainer ./runecho.sh /usr/bin
|
||||
# buildah config --cmd /usr/bin/runecho.sh $newcontainer
|
||||
|
||||
Now run the container:
|
||||
|
||||
# buildah run $newcontainer
|
||||
This is a new container from ipbabble [ 0 ]
|
||||
This is a new container from ipbabble [ 1 ]
|
||||
This is a new container from ipbabble [ 2 ]
|
||||
This is a new container from ipbabble [ 3 ]
|
||||
This is a new container from ipbabble [ 4 ]
|
||||
This is a new container from ipbabble [ 5 ]
|
||||
This is a new container from ipbabble [ 6 ]
|
||||
This is a new container from ipbabble [ 7 ]
|
||||
This is a new container from ipbabble [ 8 ]
|
||||
This is a new container from ipbabble [ 9 ]
|
||||
|
||||
It works! Congratulations, you have built a new OCI container from scratch that uses bash scripting. Let's add some more configuration information.
|
||||
|
||||
# buildah config --created-by "ipbabble" $newcontainer
|
||||
# buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora26-bashecho $newcontainer
|
||||
|
||||
We can inspect the container's metadata using the `inspect` command:
|
||||
|
||||
# buildah inspect $newcontainer
|
||||
|
||||
We should probably unmount and commit the image:
|
||||
|
||||
# buildah unmount $newcontainer
|
||||
# buildah commit $newcontainer fedora-bashecho
|
||||
# buildah images
|
||||
|
||||
And you can see there is a new image called `fedora-bashecho:latest`. You can inspect the new image using:
|
||||
|
||||
# buildah inspect --type=image fedora-bashecho
|
||||
|
||||
Later when you want to create a new container or containers from this image, you simply need need to do `buildah from fedora-bashecho`. This will create a new containers based on this image for you.
|
||||
|
||||
Now that you have the new image you can remove the scratch container called working-container:
|
||||
|
||||
# buildah rm $newcontainer
|
||||
|
||||
or
|
||||
|
||||
# buildah rm working-container
|
||||
|
||||
## OCI images built using Buildah are portable
|
||||
|
||||
Let's test if this new OCI image is really portable to another OCI technology like Docker. First you should install Docker and start it. Notice that Docker requires a daemon process (that's quite big) in order to run any client commands. Buildah has no daemon requirement.
|
||||
|
||||
# dnf -y install docker
|
||||
# systemctl start docker
|
||||
|
||||
Let's copy that image from where containers/storage stores it to where the Docker daemon stores its images, so that we can run it using Docker. We can achieve this using `buildah push`. This copies the image to Docker's repository area which is located under `/var/lib/docker`. Docker's repository is managed by the Docker daemon. This needs to be explicitly stated by telling Buildah to push to the Docker repository protocol using `docker-daemon:`.
|
||||
|
||||
# buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
|
||||
|
||||
Under the covers, the containers/image library calls into the containers/storage library to read the image's contents, and sends them to the local Docker daemon. This can take a little while. And usually you won't need to do this. If you're using `buildah` you are probably not using Docker. This is just for demo purposes. Let's try it:
|
||||
|
||||
# docker run fedora-bashecho
|
||||
This is a new container from ipbabble [ 0 ]
|
||||
This is a new container from ipbabble [ 1 ]
|
||||
This is a new container from ipbabble [ 2 ]
|
||||
This is a new container from ipbabble [ 3 ]
|
||||
This is a new container from ipbabble [ 4 ]
|
||||
This is a new container from ipbabble [ 5 ]
|
||||
This is a new container from ipbabble [ 6 ]
|
||||
This is a new container from ipbabble [ 7 ]
|
||||
This is a new container from ipbabble [ 8 ]
|
||||
This is a new container from ipbabble [ 9 ]
|
||||
|
||||
OCI container images built with `buildah` are completely standard as expected. So now it might be time to run:
|
||||
|
||||
# dnf -y remove docker
|
||||
|
||||
## Using Dockerfiles with Buildah
|
||||
|
||||
What if you have been using Docker for a while and have some existing Dockerfiles. Not a problem. Buildah can build images using a Dockerfile. The `build-using-dockerfile`, or `bud` for short, takes a Dockerfile as input and produces an OCI image.
|
||||
|
||||
Find one of your Dockerfiles or create a file called Dockerfile. Use the following example or some variation if you'd like:
|
||||
|
||||
# Base on the Fedora
|
||||
FROM fedora:latest
|
||||
MAINTAINER ipbabble email buildahboy@redhat.com # not a real email
|
||||
|
||||
# Update image and install httpd
|
||||
RUN echo "Updating all fedora packages"; dnf -y update; dnf -y clean all
|
||||
RUN echo "Installing httpd"; dnf -y install httpd
|
||||
|
||||
# Expose the default httpd port 80
|
||||
EXPOSE 80
|
||||
|
||||
# Run the httpd
|
||||
CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
|
||||
|
||||
Now run `buildah bud` with the name of the Dockerfile and the name to be given to the created image (e.g. fedora-httpd):
|
||||
|
||||
# buildah bud -f Dockerfile -t fedora-httpd
|
||||
|
||||
or, because `buildah bud` defaults to Dockerfile (note the period at the end of the example):
|
||||
|
||||
# buildah bud -t fedora-httpd .
|
||||
|
||||
You will see all the steps of the Dockerfile executing. Afterwards `buildah images` will show you the new image. Now we need to create the container using `buildah from` and test it with `buildah run`:
|
||||
|
||||
# httpcontainer=$(buildah from fedora-httpd)
|
||||
# buildah run $httpcontainer
|
||||
|
||||
While that container is running, in another shell run:
|
||||
|
||||
# curl localhost
|
||||
|
||||
You will see the standard Apache webpage.
|
||||
|
||||
Why not try and modify the Dockerfile. Do not install httpd, but instead ADD the runecho.sh file and have it run as the CMD.
|
||||
|
||||
## Congratulations
|
||||
|
||||
Well done. You have learned a lot about Buildah using this short tutorial. Hopefully you followed along with the examples and found them to be sufficient. Be sure to look at Buildah's man pages to see the other useful commands you can use. Have fun playing.
|
||||
|
||||
If you have any suggestions or issues please post them at the [ProjectAtomic Buildah Issues page](https://github.com/projectatomic/buildah/issues).
|
||||
|
||||
For more information on Buildah and how you might contribute please visit the [Buildah home page on Github](https://github.com/projectatomic/buildah).
|
||||
27
image.go
@@ -2,6 +2,7 @@ package buildah
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/docker"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,32 +69,16 @@ type containerImageSource struct {
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||
src, err := i.NewImageSource(sc, nil)
|
||||
src, err := i.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(src)
|
||||
}
|
||||
|
||||
func selectManifestType(preferred string, acceptable, supported []string) string {
|
||||
selected := preferred
|
||||
for _, accept := range acceptable {
|
||||
if preferred == accept {
|
||||
return preferred
|
||||
}
|
||||
for _, support := range supported {
|
||||
if accept == support {
|
||||
selected = accept
|
||||
}
|
||||
}
|
||||
}
|
||||
return selected
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
|
||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||
// Decide which type of manifest and configuration output we're going to provide.
|
||||
supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest}
|
||||
manifestType := selectManifestType(i.preferredManifestType, manifestTypes, supportedManifestTypes)
|
||||
manifestType := i.preferredManifestType
|
||||
// If it's not a format we support, return an error.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
@@ -417,7 +402,7 @@ func (i *containerImageSource) Reference() types.ImageReference {
|
||||
return i.ref
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetSignatures() ([][]byte, error) {
|
||||
func (i *containerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -95,6 +95,8 @@ type BuildOptions struct {
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// SkipTLSVerify denotes whether TLS verification should not be used.
|
||||
SkipTLSVerify bool
|
||||
// ReportWriter is an io.Writer which will be used to report the
|
||||
// progress of the (possible) pulling of the source image and the
|
||||
// writing of the new image.
|
||||
@@ -103,6 +105,7 @@ type BuildOptions struct {
|
||||
// configuration data.
|
||||
// Accepted values are OCIv1ImageFormat and Dockerv2ImageFormat.
|
||||
OutputFormat string
|
||||
AuthFilePath string
|
||||
}
|
||||
|
||||
// Executor is a buildah-based implementation of the imagebuilder.Executor
|
||||
@@ -136,11 +139,15 @@ type Executor struct {
|
||||
reportWriter io.Writer
|
||||
}
|
||||
|
||||
func makeSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
func makeSystemContext(signaturePolicyPath, authFilePath string, skipTLSVerify bool) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
if authFilePath != "" {
|
||||
sc.AuthFilePath = authFilePath
|
||||
}
|
||||
sc.DockerInsecureSkipTLSVerify = skipTLSVerify
|
||||
return sc
|
||||
}
|
||||
|
||||
@@ -420,7 +427,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
|
||||
outputFormat: options.OutputFormat,
|
||||
additionalTags: options.AdditionalTags,
|
||||
signaturePolicyPath: options.SignaturePolicyPath,
|
||||
systemContext: makeSystemContext(options.SignaturePolicyPath),
|
||||
systemContext: makeSystemContext(options.SignaturePolicyPath, options.AuthFilePath, options.SkipTLSVerify),
|
||||
volumeCache: make(map[string]string),
|
||||
volumeCacheInfo: make(map[string]os.FileInfo),
|
||||
log: options.Log,
|
||||
@@ -514,7 +521,7 @@ func (b *Executor) Prepare(ib *imagebuilder.Builder, node *parser.Node, from str
|
||||
}
|
||||
return errors.Wrapf(err, "error updating build context")
|
||||
}
|
||||
mountPoint, err := builder.Mount("")
|
||||
mountPoint, err := builder.Mount(builder.MountLabel)
|
||||
if err != nil {
|
||||
if err2 := builder.Delete(); err2 != nil {
|
||||
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func cloneToDirectory(url, dir string) error {
|
||||
|
||||
BIN
logos/buildah-logo-_both.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
logos/buildah-logo-reverse.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
logos/buildah-logo-reverse_md.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
logos/buildah-logo-reverse_sm.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
logos/buildah-logo.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
logos/buildah-logo_med.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
logos/buildah-logo_sm.png
Normal file
|
After Width: | Height: | Size: 7.1 KiB |
BIN
logos/buildah-no-text.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
1870
logos/buildah.svg
Normal file
|
After Width: | Height: | Size: 88 KiB |
70
new.go
@@ -2,16 +2,19 @@ package buildah
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,8 +28,37 @@ const (
|
||||
DefaultTransport = "docker://"
|
||||
)
|
||||
|
||||
func reserveSELinuxLabels(store storage.Store, id string) error {
|
||||
if selinux.GetEnabled() {
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
if id == c.ID {
|
||||
continue
|
||||
} else {
|
||||
b, err := OpenBuilder(store, c.ID)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Ignore not exist errors since containers probably created by other tool
|
||||
// TODO, we need to read other containers json data to reserve their SELinux labels
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Prevent containers from using same MCS Label
|
||||
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
var err error
|
||||
var ref types.ImageReference
|
||||
var img *storage.Image
|
||||
manifest := []byte{}
|
||||
@@ -45,6 +77,7 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
|
||||
imageID := ""
|
||||
if image != "" {
|
||||
var err error
|
||||
if options.PullPolicy == PullAlways {
|
||||
pulledReference, err2 := pullImage(store, options, systemContext)
|
||||
if err2 != nil {
|
||||
@@ -158,21 +191,32 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := reserveSELinuxLabels(store, container.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
processLabel, mountLabel, err := label.InitLabels(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
builder := &Builder{
|
||||
store: store,
|
||||
Type: containerType,
|
||||
FromImage: image,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: name,
|
||||
ContainerID: container.ID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
store: store,
|
||||
Type: containerType,
|
||||
FromImage: image,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: name,
|
||||
ContainerID: container.ID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
ProcessLabel: processLabel,
|
||||
MountLabel: mountLabel,
|
||||
DefaultMountsFilePath: options.DefaultMountsFilePath,
|
||||
}
|
||||
|
||||
if options.Mount {
|
||||
_, err = builder.Mount("")
|
||||
_, err = builder.Mount(mountLabel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting build container")
|
||||
}
|
||||
|
||||
4
pull.go
@@ -3,7 +3,6 @@ package buildah
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/signature"
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func localImageNameForReference(store storage.Store, srcRef types.ImageReference) (string, error) {
|
||||
@@ -109,6 +109,6 @@ func pullImage(store storage.Store, options BuilderOptions, sc *types.SystemCont
|
||||
|
||||
logrus.Debugf("copying %q to %q", spec, name)
|
||||
|
||||
err = cp.Image(policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, options.SystemContext, nil))
|
||||
err = cp.Image(policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, options.SystemContext, nil, ""))
|
||||
return destRef, err
|
||||
}
|
||||
|
||||
59
run.go
@@ -8,13 +8,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -100,6 +100,26 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
}
|
||||
|
||||
cdir, err := b.store.ContainerDirectory(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
|
||||
}
|
||||
|
||||
// Add secrets mounts
|
||||
mountsFiles := []string{OverrideMountsFile, b.DefaultMountsFilePath}
|
||||
for _, file := range mountsFiles {
|
||||
secretMounts, err := secretMounts(file, b.MountLabel, cdir)
|
||||
if err != nil {
|
||||
logrus.Warn("error mounting secrets, skipping...")
|
||||
}
|
||||
for _, mount := range secretMounts {
|
||||
if haveMount(mount.Destination) {
|
||||
continue
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
}
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
// volume locations, unless we already have something there.
|
||||
for _, volume := range volumes {
|
||||
@@ -107,20 +127,15 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
|
||||
// Already mounting something there, no need to bother.
|
||||
continue
|
||||
}
|
||||
cdir, err := b.store.ContainerDirectory(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
|
||||
}
|
||||
subdir := digest.Canonical.FromString(volume).Hex()
|
||||
volumePath := filepath.Join(cdir, "buildah-volumes", subdir)
|
||||
logrus.Debugf("using %q for volume at %q", volumePath, volume)
|
||||
// If we need to, initialize the volume path's initial contents.
|
||||
if _, err = os.Stat(volumePath); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(volumePath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||
}
|
||||
srcPath := filepath.Join(mountPoint, volume)
|
||||
if err = archive.CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||
if err = copyFileWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
|
||||
}
|
||||
|
||||
@@ -182,7 +197,9 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
} else if b.Hostname() != "" {
|
||||
g.SetHostname(b.Hostname())
|
||||
}
|
||||
mountPoint, err := b.Mount("")
|
||||
g.SetProcessSelinuxLabel(b.ProcessLabel)
|
||||
g.SetLinuxMountLabel(b.MountLabel)
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -191,10 +208,32 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
for _, mp := range []string{
|
||||
"/proc/kcore",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/proc/scsi",
|
||||
"/sys/firmware",
|
||||
} {
|
||||
g.AddLinuxMaskedPaths(mp)
|
||||
}
|
||||
|
||||
for _, rp := range []string{
|
||||
"/proc/asound",
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger",
|
||||
} {
|
||||
g.AddLinuxReadonlyPaths(rp)
|
||||
}
|
||||
g.SetRootPath(mountPoint)
|
||||
switch options.Terminal {
|
||||
case DefaultTerminal:
|
||||
g.SetProcessTerminal(logrus.IsTerminal(os.Stdout))
|
||||
g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
|
||||
case WithTerminal:
|
||||
g.SetProcessTerminal(true)
|
||||
case WithoutTerminal:
|
||||
|
||||
198
secrets.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultMountsFile holds the default mount paths in the form
|
||||
// "host_path:container_path"
|
||||
DefaultMountsFile = "/usr/share/containers/mounts.conf"
|
||||
// OverrideMountsFile holds the default mount paths in the form
|
||||
// "host_path:container_path" overriden by the user
|
||||
OverrideMountsFile = "/etc/containers/mounts.conf"
|
||||
)
|
||||
|
||||
// SecretData info
|
||||
type SecretData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func getMounts(filePath string) []string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
logrus.Warnf("file %q not found, skipping...", filePath)
|
||||
return nil
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
if err = scanner.Err(); err != nil {
|
||||
logrus.Warnf("error reading file %q, skipping...", filePath)
|
||||
return nil
|
||||
}
|
||||
var mounts []string
|
||||
for scanner.Scan() {
|
||||
mounts = append(mounts, scanner.Text())
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
// SaveTo saves secret data to given directory
|
||||
func (s SecretData) SaveTo(dir string) error {
|
||||
path := filepath.Join(dir, s.Name)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(path, s.Data, 0700)
|
||||
}
|
||||
|
||||
func readAll(root, prefix string) ([]SecretData, error) {
|
||||
path := filepath.Join(root, prefix)
|
||||
|
||||
data := []SecretData{}
|
||||
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
fileData, err := readFile(root, filepath.Join(prefix, f.Name()))
|
||||
if err != nil {
|
||||
// If the file did not exist, might be a dangling symlink
|
||||
// Ignore the error
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
data = append(data, fileData...)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readFile(root, name string) ([]SecretData, error) {
|
||||
path := filepath.Join(root, name)
|
||||
|
||||
s, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.IsDir() {
|
||||
dirData, err2 := readAll(root, name)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return dirData, nil
|
||||
}
|
||||
bytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []SecretData{{Name: name, Data: bytes}}, nil
|
||||
}
|
||||
|
||||
// getHostAndCtrDir separates the host:container paths
|
||||
func getMountsMap(path string) (string, string, error) {
|
||||
arr := strings.SplitN(path, ":", 2)
|
||||
if len(arr) == 2 {
|
||||
return arr[0], arr[1], nil
|
||||
}
|
||||
return "", "", errors.Errorf("unable to get host and container dir")
|
||||
}
|
||||
|
||||
func getHostSecretData(hostDir string) ([]SecretData, error) {
|
||||
var allSecrets []SecretData
|
||||
hostSecrets, err := readAll(hostDir, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir)
|
||||
}
|
||||
return append(allSecrets, hostSecrets...), nil
|
||||
}
|
||||
|
||||
// secretMount copies the contents of host directory to container directory
|
||||
// and returns a list of mounts
|
||||
func secretMounts(filePath, mountLabel, containerWorkingDir string) ([]rspec.Mount, error) {
|
||||
var mounts []rspec.Mount
|
||||
defaultMountsPaths := getMounts(filePath)
|
||||
for _, path := range defaultMountsPaths {
|
||||
hostDir, ctrDir, err := getMountsMap(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// skip if the hostDir path doesn't exist
|
||||
if _, err = os.Stat(hostDir); os.IsNotExist(err) {
|
||||
logrus.Warnf("%q doesn't exist, skipping", hostDir)
|
||||
continue
|
||||
}
|
||||
|
||||
ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir)
|
||||
if err = os.RemoveAll(ctrDirOnHost); err != nil {
|
||||
return nil, fmt.Errorf("remove container directory failed: %v", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(ctrDirOnHost, 0755); err != nil {
|
||||
return nil, fmt.Errorf("making container directory failed: %v", err)
|
||||
}
|
||||
|
||||
hostDir, err = resolveSymbolicLink(hostDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := getHostSecretData(hostDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "getting host secret data failed")
|
||||
}
|
||||
for _, s := range data {
|
||||
err = s.SaveTo(ctrDirOnHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = label.Relabel(ctrDirOnHost, mountLabel, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error applying correct labels")
|
||||
}
|
||||
|
||||
m := rspec.Mount{
|
||||
Source: ctrDirOnHost,
|
||||
Destination: ctrDir,
|
||||
Type: "bind",
|
||||
Options: []string{"bind"},
|
||||
}
|
||||
|
||||
mounts = append(mounts, m)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved
|
||||
// path; if not, returns the original path.
|
||||
func resolveSymbolicLink(path string) (string, error) {
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
return path, nil
|
||||
}
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
||||
@@ -110,5 +110,6 @@ load helpers
|
||||
buildah rmi $id
|
||||
done
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -24,6 +25,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
target=alpine-image
|
||||
@@ -37,6 +39,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -52,6 +55,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
target=alpine-image
|
||||
@@ -65,6 +69,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -88,6 +93,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -100,6 +106,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -112,6 +119,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -124,6 +132,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -136,6 +145,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -155,6 +165,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -168,6 +179,7 @@ load helpers
|
||||
buildah --debug=false images -q
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -177,6 +189,7 @@ load helpers
|
||||
target3=so-many-scratch-images
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -t ${target2} -t ${target3} ${TESTSDIR}/bud/from-scratch
|
||||
run buildah --debug=false images
|
||||
[ "$status" -eq 0 ]
|
||||
cid=$(buildah from ${target})
|
||||
buildah rm ${cid}
|
||||
cid=$(buildah from library/${target2})
|
||||
@@ -185,6 +198,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi -f $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -200,10 +214,12 @@ load helpers
|
||||
run test -s $root/vol/subvol/subvolfile
|
||||
[ "$status" -ne 0 ]
|
||||
run stat -c %f $root/vol/subvol
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = 41ed ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -217,5 +233,6 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/docker"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -18,3 +18,23 @@ load helpers
|
||||
buildah rm "$cid"
|
||||
done
|
||||
}
|
||||
|
||||
@test "push with manifest type conversion" {
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json --format oci alpine dir:my-dir
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
manifest=$(cat my-dir/manifest.json)
|
||||
run grep "application/vnd.oci.image.config.v1+json" <<< "$manifest"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json --format v2s2 alpine dir:my-dir
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
run grep "application/vnd.docker.distribution.manifest.v2+json" my-dir/manifest.json
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
buildah rm "$cid"
|
||||
buildah rmi alpine
|
||||
rm -rf my-dir
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ load helpers
|
||||
fi
|
||||
|
||||
# Build a container to use for building the binaries.
|
||||
image=fedora:26
|
||||
image=registry.fedoraproject.org/fedora:26
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
root=$(buildah --debug=false mount $cid)
|
||||
commit=$(git log --format=%H -n 1)
|
||||
@@ -27,7 +27,7 @@ load helpers
|
||||
buildah --debug=false run $cid -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
|
||||
|
||||
# Build a second new container.
|
||||
cid2=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json fedora:26)
|
||||
cid2=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json registry.fedoraproject.org/fedora:26)
|
||||
root2=$(buildah --debug=false mount $cid2)
|
||||
|
||||
# Copy the binary packages from the first container to the second one, and build a list of
|
||||
|
||||
@@ -74,26 +74,32 @@ load helpers
|
||||
buildah config $cid --entrypoint ""
|
||||
buildah config $cid --cmd pwd
|
||||
run buildah --debug=false run $cid
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = /tmp ]
|
||||
|
||||
buildah config $cid --entrypoint echo
|
||||
run buildah --debug=false run $cid
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = pwd ]
|
||||
|
||||
buildah config $cid --cmd ""
|
||||
run buildah --debug=false run $cid
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
buildah config $cid --entrypoint ""
|
||||
run buildah --debug=false run $cid echo that-other-thing
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = that-other-thing ]
|
||||
|
||||
buildah config $cid --cmd echo
|
||||
run buildah --debug=false run $cid echo that-other-thing
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = that-other-thing ]
|
||||
|
||||
buildah config $cid --entrypoint echo
|
||||
run buildah --debug=false run $cid echo that-other-thing
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = that-other-thing ]
|
||||
|
||||
buildah rm $cid
|
||||
@@ -112,8 +118,10 @@ load helpers
|
||||
root=$(buildah mount $cid)
|
||||
|
||||
testuser=jimbo
|
||||
testbogususer=nosuchuser
|
||||
testgroup=jimbogroup
|
||||
testuid=$RANDOM
|
||||
testotheruid=$RANDOM
|
||||
testgid=$RANDOM
|
||||
testgroupid=$RANDOM
|
||||
echo "$testuser:x:$testuid:$testgid:Jimbo Jenkins:/home/$testuser:/bin/sh" >> $root/etc/passwd
|
||||
@@ -122,52 +130,116 @@ load helpers
|
||||
buildah config $cid -u ""
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = 0 ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = 0 ]
|
||||
|
||||
buildah config $cid -u ${testuser}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgid ]
|
||||
|
||||
buildah config $cid -u ${testuid}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgid ]
|
||||
|
||||
buildah config $cid -u ${testuser}:${testgroup}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgroupid ]
|
||||
|
||||
buildah config $cid -u ${testuid}:${testgroup}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgroupid ]
|
||||
|
||||
buildah config $cid -u ${testotheruid}:${testgroup}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testotheruid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgroupid ]
|
||||
|
||||
buildah config $cid -u ${testotheruid}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testotheruid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = 0 ]
|
||||
|
||||
buildah config $cid -u ${testuser}:${testgroupid}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgroupid ]
|
||||
|
||||
buildah config $cid -u ${testuid}:${testgroupid}
|
||||
buildah run -- $cid id
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testuid ]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = $testgroupid ]
|
||||
|
||||
buildah config $cid -u ${testbogususer}
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "unknown user" ]]
|
||||
run buildah --debug=false run -- $cid id -g
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "unknown user" ]]
|
||||
|
||||
ln -vsf /etc/passwd $root/etc/passwd
|
||||
buildah config $cid -u ${testuser}:${testgroup}
|
||||
run buildah --debug=false run -- $cid id -u
|
||||
echo "$output"
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "unknown user" ]]
|
||||
|
||||
buildah unmount $cid
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
@test "run --hostname" {
|
||||
if ! which runc ; then
|
||||
skip
|
||||
fi
|
||||
runc --version
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
run buildah --debug=false run $cid hostname
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" != "foobar" ]
|
||||
run buildah --debug=false run --hostname foobar $cid hostname
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "foobar" ]
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
33
tests/secrets.bats
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
mkdir $TESTSDIR/containers
|
||||
touch $TESTSDIR/mounts.conf
|
||||
MOUNTS_PATH=$TESTSDIR/containers/mounts.conf
|
||||
echo "$TESTSDIR/rhel/secrets:/run/secrets" > $MOUNTS_PATH
|
||||
|
||||
mkdir $TESTSDIR/rhel
|
||||
mkdir $TESTSDIR/rhel/secrets
|
||||
touch $TESTSDIR/rhel/secrets/test.txt
|
||||
echo "Testing secrets mounts. I am mounted!" > $TESTSDIR/rhel/secrets/test.txt
|
||||
}
|
||||
|
||||
@test "bind secrets mounts to container" {
|
||||
if ! which runc ; then
|
||||
skip
|
||||
fi
|
||||
runc --version
|
||||
cid=$(buildah --default-mounts-file "$MOUNTS_PATH" --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
run buildah --debug=false run $cid ls /run
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
mounts="$output"
|
||||
run grep "secrets" <<< "$mounts"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
buildah rm $cid
|
||||
rm -rf $TESTSDIR/containers
|
||||
rm -rf $TESTSDIR/rhel
|
||||
}
|
||||
26
tests/selinux.bats
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "selinux test" {
|
||||
if ! which selinuxenabled ; then
|
||||
skip "No selinuxenabled"
|
||||
elif ! /usr/sbin/selinuxenabled; then
|
||||
skip "selinux is disabled"
|
||||
fi
|
||||
image=alpine
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
firstlabel=$(buildah --debug=false run $cid cat /proc/1/attr/current)
|
||||
run buildah --debug=false run $cid cat /proc/1/attr/current
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" == $firstlabel ]
|
||||
|
||||
cid1=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
run buildah --debug=false run $cid1 cat /proc/1/attr/current
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" != $firstlabel ]
|
||||
|
||||
buildah rm $cid
|
||||
buildah rm $cid1
|
||||
}
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
#!/bin/bash
|
||||
# test_buildah_authentication
|
||||
# A script to be run at the command line with Buildah installed.
|
||||
# This currently needs to be run as root and Docker must be
|
||||
# installed on the system.
|
||||
# This will test the code and should be run with this command:
|
||||
#
|
||||
# /bin/bash -v test_buildah_authentication.sh
|
||||
|
||||
########
|
||||
# System setup - dir for creds and start docker
|
||||
########
|
||||
mkdir -p /root/auth
|
||||
systemctl restart docker
|
||||
|
||||
########
|
||||
# Create creds and store in /root/auth/htpasswd
|
||||
########
|
||||
|
||||
193
tests/test_buildah_baseline.sh
Normal file
@@ -0,0 +1,193 @@
|
||||
#!/bin/bash
|
||||
# test_buildah_baseline.sh
|
||||
# A script to be run at the command line with Buildah installed.
|
||||
# This should be run against a new kit to provide base level testing
|
||||
# on a freshly installed machine with no images or containers in
|
||||
# play. This currently needs to be run as root.
|
||||
#
|
||||
# Commands based on the tutorial provided by William Henry.
|
||||
#
|
||||
# To run this command:
|
||||
#
|
||||
# /bin/bash -v test_buildah_baseline.sh
|
||||
|
||||
########
|
||||
# Next two commands should return blanks
|
||||
########
|
||||
buildah images
|
||||
buildah containers
|
||||
|
||||
########
|
||||
# Create Fedora based container
|
||||
########
|
||||
container=$(buildah from fedora)
|
||||
echo $container
|
||||
|
||||
########
|
||||
# Run container and display contents in /etc
|
||||
########
|
||||
buildah run $container -- ls -alF /etc
|
||||
|
||||
########
|
||||
# Run Java in the container - should FAIL
|
||||
########
|
||||
buildah run $container java
|
||||
|
||||
########
|
||||
# Install java onto the container
|
||||
########
|
||||
buildah run $container -- dnf -y install java
|
||||
|
||||
########
|
||||
# Run Java in the container - should show java usage
|
||||
########
|
||||
buildah run $container java
|
||||
|
||||
########
|
||||
# Create a scratch container
|
||||
########
|
||||
newcontainer=$(buildah from scratch)
|
||||
|
||||
########
|
||||
# Check and find two containers
|
||||
########
|
||||
buildah containers
|
||||
|
||||
########
|
||||
# Check images, no "scratch" image
|
||||
########
|
||||
buildah images
|
||||
|
||||
########
|
||||
# Run the container - should FAIL
|
||||
########
|
||||
buildah run $newcontainer bash
|
||||
|
||||
########
|
||||
# Mount the container's root file system
|
||||
########
|
||||
scratchmnt=$(buildah mount $newcontainer)
|
||||
|
||||
########
|
||||
# Show the location, should be /var/lib/containers/storage/overlay/{id}/dif
|
||||
########
|
||||
echo $scratchmnt
|
||||
|
||||
########
|
||||
# Install Fedora 26 bash and coreutils
|
||||
########
|
||||
dnf install --installroot $scratchmnt --release 26 bash coreutils --setopt install_weak_deps=false -y
|
||||
|
||||
########
|
||||
# Check /usr/bin on the new container
|
||||
########
|
||||
buildah run $newcontainer -- ls -alF /usr/bin
|
||||
|
||||
########
|
||||
# Create shell script to test on
|
||||
########
|
||||
FILE=./runecho.sh
|
||||
/bin/cat <<EOM >$FILE
|
||||
#!/bin/bash
|
||||
for i in {1..9};
|
||||
do
|
||||
echo "This is a new container from ipbabble [" $i "]"
|
||||
done
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
|
||||
########
|
||||
# Copy and run file on scratch container
|
||||
########
|
||||
buildah copy $newcontainer $FILE /usr/bin
|
||||
buildah config --cmd /usr/bin/runecho.sh $newcontainer
|
||||
buildah run $newcontainer
|
||||
|
||||
########
|
||||
# Add configuration information
|
||||
########
|
||||
buildah config --created-by "ipbabble" $newcontainer
|
||||
buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora26-bashecho $newcontainer
|
||||
|
||||
########
|
||||
# Inspect the container, verifying above was put into it
|
||||
########
|
||||
buildah inspect $newcontainer
|
||||
|
||||
########
|
||||
# Unmount the container
|
||||
########
|
||||
buildah unmount $newcontainer
|
||||
|
||||
########
|
||||
# Commit the image
|
||||
########
|
||||
buildah commit $newcontainer fedora-bashecho
|
||||
|
||||
########
|
||||
# Check the images there should be a fedora-basecho:latest image
|
||||
########
|
||||
buildah images
|
||||
|
||||
########
|
||||
# Inspect the fedora-baseecho image
|
||||
########
|
||||
buildah inspect --type=image fedora-bashecho
|
||||
|
||||
########
|
||||
# Remove the container
|
||||
########
|
||||
buildah rm $newcontainer
|
||||
|
||||
########
|
||||
# Install Docker, but not for long!
|
||||
########
|
||||
dnf -y install docker
|
||||
systemctl start docker
|
||||
|
||||
########
|
||||
# Push fedora-basecho to the Docker daemon
|
||||
########
|
||||
buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
|
||||
|
||||
########
|
||||
# Run fedora-bashecho from Docker
|
||||
########
|
||||
docker run fedoara-baseecho
|
||||
|
||||
########
|
||||
# Time to remove Docker
|
||||
########
|
||||
dnf -y remove docker
|
||||
|
||||
########
|
||||
# Build Dockerfile
|
||||
########
|
||||
FILE=./Dockerfile
|
||||
/bin/cat <<EOM >$FILE
|
||||
FROM docker/whalesay:latest
|
||||
RUN apt-get -y update && apt-get install -y fortunes
|
||||
CMD /usr/games/fortune -a | cowsay
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
|
||||
########
|
||||
# Build with the Dockerfile
|
||||
########
|
||||
buildah bud -f Dockerfile -t whale-says
|
||||
|
||||
########
|
||||
# Create a whalesays container
|
||||
########
|
||||
whalesays=$(buildah from whale-says)
|
||||
|
||||
########
|
||||
# Run the container to see what the whale says
|
||||
########
|
||||
buildah run $whalesays
|
||||
|
||||
########
|
||||
# Clean up Buildah
|
||||
########
|
||||
buildah rm $(buildah containers -q)
|
||||
buildah rmi -f $(buildah --debug=false images -q)
|
||||
@@ -3,6 +3,10 @@ set -e
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
|
||||
# Default to using /var/tmp for test space, since it's more likely to support
|
||||
# labels than /tmp, which is often on tmpfs.
|
||||
export TMPDIR=${TMPDIR:-/var/tmp}
|
||||
|
||||
# Load the helpers.
|
||||
. helpers.bash
|
||||
|
||||
|
||||
@@ -17,5 +17,5 @@ exec gometalinter.v1 \
|
||||
--disable=gas \
|
||||
--disable=aligncheck \
|
||||
--cyclo-over=40 \
|
||||
--deadline=240s \
|
||||
--deadline=480s \
|
||||
--tests "$@"
|
||||
|
||||
@@ -10,6 +10,7 @@ load helpers
|
||||
|
||||
@test "buildah version up to date in .spec file" {
|
||||
run buildah version
|
||||
[ "$status" -eq 0 ]
|
||||
bversion=$(echo "$output" | awk '/^Version:/ { print $NF }')
|
||||
rversion=$(cat ${TESTSDIR}/../contrib/rpm/buildah.spec | awk '/^Version:/ { print $NF }')
|
||||
test "$bversion" = "$rversion"
|
||||
|
||||
27
user.go
@@ -26,39 +26,34 @@ func getUser(rootdir, userspec string) (specs.User, error) {
|
||||
uid64, uerr := strconv.ParseUint(userspec, 10, 32)
|
||||
if uerr == nil && groupspec == "" {
|
||||
// We parsed the user name as a number, and there's no group
|
||||
// component, so we need to look up the user's primary GID.
|
||||
// component, so try to look up the primary GID of the user who
|
||||
// has this UID.
|
||||
var name string
|
||||
name, gid64, gerr = lookupGroupForUIDInContainer(rootdir, uid64)
|
||||
if gerr == nil {
|
||||
userspec = name
|
||||
} else {
|
||||
if userrec, err := user.LookupId(userspec); err == nil {
|
||||
gid64, gerr = strconv.ParseUint(userrec.Gid, 10, 32)
|
||||
userspec = userrec.Name
|
||||
}
|
||||
// Leave userspec alone, but swallow the error and just
|
||||
// use GID 0.
|
||||
gid64 = 0
|
||||
gerr = nil
|
||||
}
|
||||
}
|
||||
if uerr != nil {
|
||||
// The user ID couldn't be parsed as a number, so try to look
|
||||
// up the user's UID and primary GID.
|
||||
uid64, gid64, uerr = lookupUserInContainer(rootdir, userspec)
|
||||
gerr = uerr
|
||||
}
|
||||
if uerr != nil {
|
||||
if userrec, err := user.Lookup(userspec); err == nil {
|
||||
uid64, uerr = strconv.ParseUint(userrec.Uid, 10, 32)
|
||||
gid64, gerr = strconv.ParseUint(userrec.Gid, 10, 32)
|
||||
}
|
||||
}
|
||||
|
||||
if groupspec != "" {
|
||||
// We have a group name or number, so parse it.
|
||||
gid64, gerr = strconv.ParseUint(groupspec, 10, 32)
|
||||
if gerr != nil {
|
||||
// The group couldn't be parsed as a number, so look up
|
||||
// the group's GID.
|
||||
gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
|
||||
}
|
||||
if gerr != nil {
|
||||
if group, err := user.LookupGroup(groupspec); err == nil {
|
||||
gid64, gerr = strconv.ParseUint(group.Gid, 10, 32)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if uerr == nil && gerr == nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !cgo !linux
|
||||
// +build !linux
|
||||
|
||||
package buildah
|
||||
|
||||
|
||||
235
user_linux.go
Normal file
@@ -0,0 +1,235 @@
|
||||
// +build linux
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
openChrootedCommand = Package + "-open"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register(openChrootedCommand, openChrootedFileMain)
|
||||
}
|
||||
|
||||
func openChrootedFileMain() {
|
||||
status := 0
|
||||
flag.Parse()
|
||||
if len(flag.Args()) < 1 {
|
||||
os.Exit(1)
|
||||
}
|
||||
// Our first parameter is the directory to chroot into.
|
||||
if err := unix.Chdir(flag.Arg(0)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "chdir(): %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := unix.Chroot(flag.Arg(0)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "chroot(): %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Anything else is a file we want to dump out.
|
||||
for _, filename := range flag.Args()[1:] {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "open(%q): %v", filename, err)
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
_, err = io.Copy(os.Stdout, f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "read(%q): %v", filename, err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error) {
|
||||
// The child process expects a chroot and one or more filenames that
|
||||
// will be consulted relative to the chroot directory and concatenated
|
||||
// to its stdout. Start it up.
|
||||
cmd := reexec.Command(openChrootedCommand, rootdir, filename)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Hand back the child's stdout for reading, and the child to reap.
|
||||
return cmd, stdout, nil
|
||||
}
|
||||
|
||||
var (
|
||||
lookupUser, lookupGroup sync.Mutex
|
||||
)
|
||||
|
||||
type lookupPasswdEntry struct {
|
||||
name string
|
||||
uid uint64
|
||||
gid uint64
|
||||
}
|
||||
type lookupGroupEntry struct {
|
||||
name string
|
||||
gid uint64
|
||||
}
|
||||
|
||||
func readWholeLine(rc *bufio.Reader) ([]byte, error) {
|
||||
line, isPrefix, err := rc.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for isPrefix {
|
||||
// We didn't get a whole line. Keep reading chunks until we find an end of line, and discard them.
|
||||
for isPrefix {
|
||||
logrus.Debugf("discarding partial line %q", string(line))
|
||||
_, isPrefix, err = rc.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// That last read was the end of a line, so now we try to read the (beginning of?) the next line.
|
||||
line, isPrefix, err = rc.ReadLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
|
||||
func parseNextPasswd(rc *bufio.Reader) *lookupPasswdEntry {
|
||||
line, err := readWholeLine(rc)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fields := strings.Split(string(line), ":")
|
||||
if len(fields) < 7 {
|
||||
return nil
|
||||
}
|
||||
uid, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
gid, err := strconv.ParseUint(fields[3], 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &lookupPasswdEntry{
|
||||
name: fields[0],
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
}
|
||||
}
|
||||
|
||||
func parseNextGroup(rc *bufio.Reader) *lookupGroupEntry {
|
||||
line, err := readWholeLine(rc)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fields := strings.Split(string(line), ":")
|
||||
if len(fields) < 4 {
|
||||
return nil
|
||||
}
|
||||
gid, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &lookupGroupEntry{
|
||||
name: fields[0],
|
||||
gid: gid,
|
||||
}
|
||||
}
|
||||
|
||||
func lookupUserInContainer(rootdir, username string) (uid uint64, gid uint64, err error) {
|
||||
cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = cmd.Wait()
|
||||
}()
|
||||
rc := bufio.NewReader(f)
|
||||
defer f.Close()
|
||||
|
||||
lookupUser.Lock()
|
||||
defer lookupUser.Unlock()
|
||||
|
||||
pwd := parseNextPasswd(rc)
|
||||
for pwd != nil {
|
||||
if pwd.name != username {
|
||||
pwd = parseNextPasswd(rc)
|
||||
continue
|
||||
}
|
||||
return pwd.uid, pwd.gid, nil
|
||||
}
|
||||
|
||||
return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username))
|
||||
}
|
||||
|
||||
func lookupGroupForUIDInContainer(rootdir string, userid uint64) (username string, gid uint64, err error) {
|
||||
cmd, f, err := openChrootedFile(rootdir, "/etc/passwd")
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = cmd.Wait()
|
||||
}()
|
||||
rc := bufio.NewReader(f)
|
||||
defer f.Close()
|
||||
|
||||
lookupUser.Lock()
|
||||
defer lookupUser.Unlock()
|
||||
|
||||
pwd := parseNextPasswd(rc)
|
||||
for pwd != nil {
|
||||
if pwd.uid != userid {
|
||||
pwd = parseNextPasswd(rc)
|
||||
continue
|
||||
}
|
||||
return pwd.name, pwd.gid, nil
|
||||
}
|
||||
|
||||
return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up user with UID %d", userid))
|
||||
}
|
||||
|
||||
func lookupGroupInContainer(rootdir, groupname string) (gid uint64, err error) {
|
||||
cmd, f, err := openChrootedFile(rootdir, "/etc/group")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = cmd.Wait()
|
||||
}()
|
||||
rc := bufio.NewReader(f)
|
||||
defer f.Close()
|
||||
|
||||
lookupGroup.Lock()
|
||||
defer lookupGroup.Unlock()
|
||||
|
||||
grp := parseNextGroup(rc)
|
||||
for grp != nil {
|
||||
if grp.name != groupname {
|
||||
grp = parseNextGroup(rc)
|
||||
continue
|
||||
}
|
||||
return grp.gid, nil
|
||||
}
|
||||
|
||||
return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname))
|
||||
}
|
||||
124
user_unix_cgo.go
@@ -1,124 +0,0 @@
|
||||
// +build cgo
|
||||
// +build linux
|
||||
|
||||
package buildah
|
||||
|
||||
// #include <sys/types.h>
|
||||
// #include <grp.h>
|
||||
// #include <pwd.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <stdio.h>
|
||||
// #include <string.h>
|
||||
// typedef FILE * pFILE;
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func fopenContainerFile(rootdir, filename string) (C.pFILE, error) {
|
||||
var st, lst syscall.Stat_t
|
||||
|
||||
ctrfile := filepath.Join(rootdir, filename)
|
||||
cctrfile := C.CString(ctrfile)
|
||||
defer C.free(unsafe.Pointer(cctrfile))
|
||||
mode := C.CString("r")
|
||||
defer C.free(unsafe.Pointer(mode))
|
||||
f, err := C.fopen(cctrfile, mode)
|
||||
if f == nil || err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening %q", ctrfile)
|
||||
}
|
||||
if err = syscall.Fstat(int(C.fileno(f)), &st); err != nil {
|
||||
return nil, errors.Wrapf(err, "fstat(%q)", ctrfile)
|
||||
}
|
||||
if err = syscall.Lstat(ctrfile, &lst); err != nil {
|
||||
return nil, errors.Wrapf(err, "lstat(%q)", ctrfile)
|
||||
}
|
||||
if st.Dev != lst.Dev || st.Ino != lst.Ino {
|
||||
return nil, errors.Errorf("%q is not a regular file", ctrfile)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var (
|
||||
lookupUser, lookupGroup sync.Mutex
|
||||
)
|
||||
|
||||
func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) {
|
||||
name := C.CString(username)
|
||||
defer C.free(unsafe.Pointer(name))
|
||||
|
||||
f, err := fopenContainerFile(rootdir, "/etc/passwd")
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
defer C.fclose(f)
|
||||
|
||||
lookupUser.Lock()
|
||||
defer lookupUser.Unlock()
|
||||
|
||||
pwd := C.fgetpwent(f)
|
||||
for pwd != nil {
|
||||
if C.strcmp(pwd.pw_name, name) != 0 {
|
||||
pwd = C.fgetpwent(f)
|
||||
continue
|
||||
}
|
||||
return uint64(pwd.pw_uid), uint64(pwd.pw_gid), nil
|
||||
}
|
||||
|
||||
return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username))
|
||||
}
|
||||
|
||||
func lookupGroupForUIDInContainer(rootdir string, userid uint64) (string, uint64, error) {
|
||||
f, err := fopenContainerFile(rootdir, "/etc/passwd")
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
defer C.fclose(f)
|
||||
|
||||
lookupUser.Lock()
|
||||
defer lookupUser.Unlock()
|
||||
|
||||
pwd := C.fgetpwent(f)
|
||||
for pwd != nil {
|
||||
if uint64(pwd.pw_uid) != userid {
|
||||
pwd = C.fgetpwent(f)
|
||||
continue
|
||||
}
|
||||
return C.GoString(pwd.pw_name), uint64(pwd.pw_gid), nil
|
||||
}
|
||||
|
||||
return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up user with UID %d", userid))
|
||||
}
|
||||
|
||||
func lookupGroupInContainer(rootdir, groupname string) (uint64, error) {
|
||||
name := C.CString(groupname)
|
||||
defer C.free(unsafe.Pointer(name))
|
||||
|
||||
f, err := fopenContainerFile(rootdir, "/etc/group")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer C.fclose(f)
|
||||
|
||||
lookupGroup.Lock()
|
||||
defer lookupGroup.Unlock()
|
||||
|
||||
grp := C.fgetgrent(f)
|
||||
for grp != nil {
|
||||
if C.strcmp(grp.gr_name, name) != 0 {
|
||||
grp = C.fgetgrent(f)
|
||||
continue
|
||||
}
|
||||
return uint64(grp.gr_gid), nil
|
||||
}
|
||||
|
||||
return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname))
|
||||
}
|
||||
8
util.go
@@ -1,9 +1,17 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
var (
|
||||
// CopyWithTar defines the copy method to use.
|
||||
copyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
|
||||
copyFileWithTar = chrootarchive.NewArchiver(nil).CopyFileWithTar
|
||||
untarPath = chrootarchive.NewArchiver(nil).UntarPath
|
||||
)
|
||||
|
||||
// InitReexec is a wrapper for reexec.Init(). It should be called at
|
||||
// the start of main(), and if it returns true, main() should return
|
||||
// immediately.
|
||||
|
||||
33
vendor.conf
@@ -1,14 +1,15 @@
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/Nvveen/Gotty master
|
||||
github.com/blang/semver master
|
||||
github.com/containers/image 106607808da3cff168be56821e994611c919d283
|
||||
github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be
|
||||
github.com/docker/distribution master
|
||||
github.com/docker/docker 0f9ec7e47072b0c2e954b5b821bde5c1fe81bfa7
|
||||
github.com/containers/image f950aa3529148eb0dea90888c24b6682da641b13
|
||||
github.com/containers/storage d7921c6facc516358070a1306689eda18adaa20a
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||
github.com/docker/engine-api master
|
||||
github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
|
||||
github.com/docker/go-units master
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/fsouza/go-dockerclient master
|
||||
github.com/ghodss/yaml master
|
||||
github.com/golang/glog master
|
||||
@@ -19,23 +20,23 @@ github.com/imdario/mergo master
|
||||
github.com/mattn/go-runewidth master
|
||||
github.com/mattn/go-shellwords master
|
||||
github.com/mistifyio/go-zfs master
|
||||
github.com/moby/moby 0f9ec7e47072b0c2e954b5b821bde5c1fe81bfa7
|
||||
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools 2d270b8764c02228eeb13e36f076f5ce6f2e3591
|
||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||
github.com/opencontainers/runtime-tools master
|
||||
github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
|
||||
github.com/openshift/imagebuilder master
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/pborman/uuid master
|
||||
github.com/pkg/errors master
|
||||
github.com/Sirupsen/logrus master
|
||||
github.com/sirupsen/logrus master
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/tchap/go-patricia master
|
||||
github.com/urfave/cli master
|
||||
github.com/vbatts/tar-split master
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/crypto master
|
||||
golang.org/x/net master
|
||||
golang.org/x/sys master
|
||||
@@ -45,3 +46,11 @@ gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
|
||||
k8s.io/apimachinery master
|
||||
k8s.io/client-go master
|
||||
k8s.io/kubernetes master
|
||||
github.com/hashicorp/go-multierror master
|
||||
github.com/hashicorp/errwrap master
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/containerd/continuity master
|
||||
github.com/gogo/protobuf master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
|
||||
10
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
return true
|
||||
}
|
||||
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
||||
28
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
var termios Termios
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// +build solaris,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
|
||||
return err == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
33
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
5
vendor/github.com/containers/image/README.md
generated
vendored
@@ -62,9 +62,8 @@ or use the build tags described below to avoid the dependencies (e.g. using `go
|
||||
|
||||
- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
|
||||
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries.
|
||||
|
||||
(Note that explicitly importing `github.com/containers/image/ostree` will still depend on the `libostree` library, this build tag only affects generic users of …`/alltransports`.)
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
18
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -3,6 +3,7 @@ package copy
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -11,9 +12,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
@@ -21,6 +19,8 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
@@ -94,6 +94,8 @@ type Options struct {
|
||||
DestinationCtx *types.SystemContext
|
||||
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
|
||||
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
|
||||
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
|
||||
ForceManifestMIMEType string
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
@@ -128,9 +130,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
}
|
||||
}()
|
||||
|
||||
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
|
||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
sigs = [][]byte{}
|
||||
} else {
|
||||
writeReport("Getting image source signatures\n")
|
||||
s, err := src.Signatures()
|
||||
s, err := src.Signatures(context.TODO())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest)
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest, options.ForceManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -582,7 +582,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer fmt.Fprint(ic.reportWriter, "\n")
|
||||
defer bar.Finish()
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
|
||||
8
vendor/github.com/containers/image/copy/manifest.go
generated
vendored
@@ -3,10 +3,10 @@ package copy
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
@@ -41,12 +41,16 @@ func (os *orderedSet) append(s string) {
|
||||
// Note that the conversion will only happen later, through src.UpdatedImage
|
||||
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
|
||||
// and a list of other possible alternatives, in order.
|
||||
func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) {
|
||||
func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool, forceManifestMIMEType string) (string, []string, error) {
|
||||
_, srcType, err := src.Manifest()
|
||||
if err != nil { // This should have been cached?!
|
||||
return "", nil, errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
|
||||
if forceManifestMIMEType != "" {
|
||||
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
|
||||
}
|
||||
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
|
||||
}
|
||||
|
||||
104
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -4,19 +4,77 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const version = "Directory Transport Version: 1.0\n"
|
||||
|
||||
// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
|
||||
// using the 'dir' transport
|
||||
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
|
||||
|
||||
type dirImageDestination struct {
|
||||
ref dirReference
|
||||
ref dirReference
|
||||
compress bool
|
||||
}
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(ref dirReference) types.ImageDestination {
|
||||
return &dirImageDestination{ref}
|
||||
// newImageDestination returns an ImageDestination for writing to a directory.
|
||||
func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
|
||||
d := &dirImageDestination{ref: ref, compress: compress}
|
||||
|
||||
// If directory exists check if it is empty
|
||||
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
|
||||
// if the contents don't match throw an error
|
||||
dirExists, err := pathExists(d.ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
|
||||
}
|
||||
if dirExists {
|
||||
isEmpty, err := isDirEmpty(d.ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isEmpty {
|
||||
versionExists, err := pathExists(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check if contents of version file is what we expect it to be
|
||||
if string(contents) != version {
|
||||
return nil, ErrNotContainerImageDir
|
||||
}
|
||||
} else {
|
||||
return nil, ErrNotContainerImageDir
|
||||
}
|
||||
// delete directory contents so that only one image is in the directory at a time
|
||||
if err = removeDirContents(d.ref.resolvedPath); err != nil {
|
||||
return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
|
||||
}
|
||||
logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
|
||||
}
|
||||
} else {
|
||||
// create directory if it doesn't exist
|
||||
if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
|
||||
}
|
||||
}
|
||||
// create version file
|
||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
@@ -42,7 +100,7 @@ func (d *dirImageDestination) SupportsSignatures() error {
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *dirImageDestination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
return d.compress
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
@@ -147,3 +205,39 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
func (d *dirImageDestination) Commit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if path exists
|
||||
func pathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// returns true if directory is empty
|
||||
func isDirEmpty(path string) (bool, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(files) == 0, nil
|
||||
}
|
||||
|
||||
// deletes the contents of a directory
|
||||
func removeDirContents(path string) error {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -59,7 +60,7 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
|
||||
return r, fi.Size(), nil
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetSignatures() ([][]byte, error) {
|
||||
func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
|
||||
|
||||
17
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@@ -143,18 +143,20 @@ func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error)
|
||||
return image.FromSource(src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dirReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ref), nil
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(ref), nil
|
||||
compress := false
|
||||
if ctx != nil {
|
||||
compress = ctx.DirForceCompress
|
||||
}
|
||||
return newImageDestination(ref, compress)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
@@ -177,3 +179,8 @@ func (ref dirReference) layerPath(digest digest.Digest) string {
|
||||
func (ref dirReference) signaturePath(index int) string {
|
||||
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
|
||||
}
|
||||
|
||||
// versionPath returns a path for the version file within a directory using our conventions.
|
||||
func (ref dirReference) versionPath() string {
|
||||
return filepath.Join(ref.path, "version")
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
@@ -1,9 +1,9 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type archiveImageSource struct {
|
||||
|
||||
6
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
@@ -134,11 +134,9 @@ func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, err
|
||||
return ctrImage.FromSource(src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref), nil
|
||||
}
|
||||
|
||||
|
||||
69
vendor/github.com/containers/image/docker/daemon/client.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default API version to be used in case none is explicitly specified
|
||||
defaultAPIVersion = "1.22"
|
||||
)
|
||||
|
||||
// NewDockerClient initializes a new API client based on the passed SystemContext.
|
||||
func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) {
|
||||
host := dockerclient.DefaultDockerHost
|
||||
if ctx != nil && ctx.DockerDaemonHost != "" {
|
||||
host = ctx.DockerDaemonHost
|
||||
}
|
||||
|
||||
// Sadly, unix:// sockets don't work transparently with dockerclient.NewClient.
|
||||
// They work fine with a nil httpClient; with a non-nil httpClient, the transport’s
|
||||
// TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket
|
||||
// regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport.
|
||||
//
|
||||
// We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client.
|
||||
proto, _, _, err := dockerclient.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var httpClient *http.Client
|
||||
if proto != "unix" {
|
||||
hc, err := tlsConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpClient = hc
|
||||
}
|
||||
|
||||
return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil)
|
||||
}
|
||||
|
||||
func tlsConfig(ctx *types.SystemContext) (*http.Client, error) {
|
||||
options := tlsconfig.Options{}
|
||||
if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify {
|
||||
options.InsecureSkipVerify = true
|
||||
}
|
||||
|
||||
if ctx != nil && ctx.DockerDaemonCertPath != "" {
|
||||
options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem")
|
||||
options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem")
|
||||
options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem")
|
||||
}
|
||||
|
||||
tlsc, err := tlsconfig.Client(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsc,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
}
|
||||
10
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
@@ -3,12 +3,12 @@ package daemon
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ type daemonImageDestination struct {
|
||||
}
|
||||
|
||||
// newImageDestination returns a types.ImageDestination for the specified image reference.
|
||||
func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
|
||||
func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
|
||||
if ref.ref == nil {
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
c, err := newDockerClient(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
@@ -42,8 +42,8 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
|
||||
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
|
||||
statusChannel := make(chan error, 1)
|
||||
|
||||
ctx, goroutineCancel := context.WithCancel(context.Background())
|
||||
go imageLoadGoroutine(ctx, c, reader, statusChannel)
|
||||
goroutineContext, goroutineCancel := context.WithCancel(context.Background())
|
||||
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
|
||||
|
||||
return &daemonImageDestination{
|
||||
ref: ref,
|
||||
|
||||
3
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@@ -35,7 +34,7 @@ type layerInfo struct {
|
||||
// is the config, and that the following len(RootFS) files are the layers, but that feels
|
||||
// way too brittle.)
|
||||
func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
c, err := newDockerClient(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
@@ -161,11 +161,9 @@ func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, erro
|
||||
return image.FromSource(src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref daemonReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
func (ref daemonReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref)
|
||||
}
|
||||
|
||||
|
||||
313
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -1,38 +1,31 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/containers/image/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerHostname = "docker.io"
|
||||
dockerRegistry = "registry-1.docker.io"
|
||||
dockerAuthRegistry = "https://index.docker.io/v1/"
|
||||
|
||||
dockerCfg = ".docker"
|
||||
dockerCfgFileName = "config.json"
|
||||
dockerCfgObsolete = ".dockercfg"
|
||||
dockerHostname = "docker.io"
|
||||
dockerRegistry = "registry-1.docker.io"
|
||||
|
||||
systemPerHostCertDirPath = "/etc/docker/certs.d"
|
||||
|
||||
@@ -50,9 +43,13 @@ const (
|
||||
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
|
||||
)
|
||||
|
||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||
// docker V1 registry.
|
||||
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
||||
var (
|
||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||
// docker V1 registry.
|
||||
ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
||||
// ErrUnauthorizedForCredentials is returned when the status code returned is 401
|
||||
ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password")
|
||||
)
|
||||
|
||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||
// signature represents a Docker image signature.
|
||||
@@ -111,27 +108,7 @@ func serverDefault() *tls.Config {
|
||||
}
|
||||
}
|
||||
|
||||
func newTransport() *http.Transport {
|
||||
direct := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: direct.Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
proxyDialer, err := sockets.DialerFromEnvironment(direct)
|
||||
if err == nil {
|
||||
tr.Dial = proxyDialer.Dial
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort.
|
||||
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
|
||||
func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
|
||||
if ctx != nil && ctx.DockerCertPath != "" {
|
||||
return ctx.DockerCertPath
|
||||
@@ -147,131 +124,105 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
|
||||
return filepath.Join(hostCertDir, hostPort)
|
||||
}
|
||||
|
||||
func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
|
||||
fs, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
fullPath := filepath.Join(dir, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := ioutil.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
certName := f.Name()
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
logrus.Debugf(" cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
keyName := f.Name()
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
logrus.Debugf(" key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasFile(files []os.FileInfo, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
username, password, err := config.GetAuthentication(ctx, reference.Domain(ref.ref))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
username, password, err := getAuth(ctx, reference.Domain(ref.ref))
|
||||
sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := newTransport()
|
||||
remoteName := reference.Path(ref.ref)
|
||||
|
||||
return newDockerClientWithDetails(ctx, registry, username, password, actions, sigBase, remoteName)
|
||||
}
|
||||
|
||||
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
|
||||
func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = serverDefault()
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
|
||||
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
|
||||
// undocumented and may change if docker/docker changes.
|
||||
certDir := dockerCertDir(ctx, reference.Domain(ref.ref))
|
||||
if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||
certDir := dockerCertDir(ctx, hostName)
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = true
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
|
||||
sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &dockerClient{
|
||||
ctx: ctx,
|
||||
registry: registry,
|
||||
username: username,
|
||||
password: password,
|
||||
client: client,
|
||||
client: &http.Client{Transport: tr},
|
||||
signatureBase: sigBase,
|
||||
scope: authScope{
|
||||
actions: actions,
|
||||
remoteName: reference.Path(ref.ref),
|
||||
remoteName: remoteName,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occcured while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, password, registry string) error {
|
||||
newLoginClient, err := newDockerClientWithDetails(sCtx, registry, username, password, "", nil, "")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return nil
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorizedForCredentials
|
||||
default:
|
||||
return errors.Errorf("error occured with status code %q", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
if err := c.detectProperties(); err != nil {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||
req.ContentLength = streamLen
|
||||
}
|
||||
@@ -322,8 +273,11 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
token, err := c.getBearerToken(realm, service, scope)
|
||||
var scope string
|
||||
if c.scope.remoteName != "" && c.scope.actions != "" {
|
||||
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
}
|
||||
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -340,11 +294,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authReq = authReq.WithContext(ctx)
|
||||
getParams := authReq.URL.Query()
|
||||
if service != "" {
|
||||
getParams.Add("service", service)
|
||||
@@ -356,7 +311,7 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToke
|
||||
if c.username != "" && c.password != "" {
|
||||
authReq.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
tr := newTransport()
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
// TODO(runcom): insecure for now to contact the external token service
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
client := &http.Client{Transport: tr}
|
||||
@@ -367,7 +322,7 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToke
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
return nil, ErrUnauthorizedForCredentials
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
@@ -391,70 +346,16 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToke
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
func getAuth(ctx *types.SystemContext, registry string) (string, string, error) {
|
||||
if ctx != nil && ctx.DockerAuthConfig != nil {
|
||||
return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil
|
||||
}
|
||||
var dockerAuth dockerConfigFile
|
||||
dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName)
|
||||
if _, err := os.Stat(dockerCfgPath); err == nil {
|
||||
j, err := ioutil.ReadFile(dockerCfgPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if err := json.Unmarshal(j, &dockerAuth); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
} else if os.IsNotExist(err) {
|
||||
// try old config path
|
||||
oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete))
|
||||
if _, err := os.Stat(oldDockerCfgPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", nil
|
||||
}
|
||||
return "", "", errors.Wrap(err, oldDockerCfgPath)
|
||||
}
|
||||
|
||||
j, err := ioutil.ReadFile(oldDockerCfgPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if err := json.Unmarshal(j, &dockerAuth.AuthConfigs); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return "", "", errors.Wrap(err, dockerCfgPath)
|
||||
}
|
||||
|
||||
// I'm feeling lucky
|
||||
if c, exists := dockerAuth.AuthConfigs[registry]; exists {
|
||||
return decodeDockerAuth(c.Auth)
|
||||
}
|
||||
|
||||
// bad luck; let's normalize the entries first
|
||||
registry = normalizeRegistry(registry)
|
||||
normalizedAuths := map[string]dockerAuthConfig{}
|
||||
for k, v := range dockerAuth.AuthConfigs {
|
||||
normalizedAuths[normalizeRegistry(k)] = v
|
||||
}
|
||||
if c, exists := normalizedAuths[registry]; exists {
|
||||
return decodeDockerAuth(c.Auth)
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties() error {
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -481,7 +382,7 @@ func (c *dockerClient) detectProperties() error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -506,9 +407,9 @@ func (c *dockerClient) detectProperties() error {
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest("GET", path, nil, nil)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -527,55 +428,3 @@ func (c *dockerClient) getExtensionsSignatures(ref dockerReference, manifestDige
|
||||
}
|
||||
return &parsedBody, nil
|
||||
}
|
||||
|
||||
func getDefaultConfigDir(confPath string) string {
|
||||
return filepath.Join(homedir.Get(), confPath)
|
||||
}
|
||||
|
||||
type dockerAuthConfig struct {
|
||||
Auth string `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
type dockerConfigFile struct {
|
||||
AuthConfigs map[string]dockerAuthConfig `json:"auths"`
|
||||
}
|
||||
|
||||
func decodeDockerAuth(s string) (string, string, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// if it's invalid just skip, as docker does
|
||||
return "", "", nil
|
||||
}
|
||||
user := parts[0]
|
||||
password := strings.Trim(parts[1], "\x00")
|
||||
return user, password, nil
|
||||
}
|
||||
|
||||
// convertToHostname converts a registry url which has http|https prepended
|
||||
// to just an hostname.
|
||||
// Copied from github.com/docker/docker/registry/auth.go
|
||||
func convertToHostname(url string) string {
|
||||
stripped := url
|
||||
if strings.HasPrefix(url, "http://") {
|
||||
stripped = strings.TrimPrefix(url, "http://")
|
||||
} else if strings.HasPrefix(url, "https://") {
|
||||
stripped = strings.TrimPrefix(url, "https://")
|
||||
}
|
||||
|
||||
nameParts := strings.SplitN(stripped, "/", 2)
|
||||
|
||||
return nameParts[0]
|
||||
}
|
||||
|
||||
func normalizeRegistry(registry string) string {
|
||||
normalized := convertToHostname(registry)
|
||||
switch normalized {
|
||||
case "registry-1.docker.io", "docker.io":
|
||||
return "index.docker.io"
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -22,7 +23,7 @@ type Image struct {
|
||||
// a client to the registry hosting the given image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
|
||||
s, err := newImageSource(ctx, ref, nil)
|
||||
s, err := newImageSource(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -41,7 +42,8 @@ func (i *Image) SourceRefFullName() string {
|
||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||
func (i *Image) GetRepositoryTags() ([]string, error) {
|
||||
path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
|
||||
res, err := i.src.c.makeRequest("GET", path, nil, nil)
|
||||
// FIXME: Pass the context.Context
|
||||
res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
54
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -2,6 +2,7 @@ package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
@@ -20,24 +20,11 @@ import (
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var manifestMIMETypes = []string{
|
||||
// TODO(runcom): we'll add OCI as part of another PR here
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
}
|
||||
|
||||
func supportedManifestMIMETypesMap() map[string]bool {
|
||||
m := make(map[string]bool, len(manifestMIMETypes))
|
||||
for _, mt := range manifestMIMETypes {
|
||||
m[mt] = true
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type dockerImageDestination struct {
|
||||
ref dockerReference
|
||||
c *dockerClient
|
||||
@@ -47,7 +34,7 @@ type dockerImageDestination struct {
|
||||
|
||||
// newImageDestination creates a new ImageDestination for the specified image reference.
|
||||
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
||||
c, err := newDockerClient(ctx, ref, true, "pull,push")
|
||||
c, err := newDockerClientFromRef(ctx, ref, true, "pull,push")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -69,13 +56,18 @@ func (d *dockerImageDestination) Close() error {
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return manifestMIMETypes
|
||||
return []string{
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||
if err := d.c.detectProperties(); err != nil {
|
||||
if err := d.c.detectProperties(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
@@ -132,7 +124,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest("POST", uploadPath, nil, nil)
|
||||
res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -149,7 +141,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -168,7 +160,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -193,7 +185,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest("HEAD", checkPath, nil, nil)
|
||||
res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -239,12 +231,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest("PUT", path, headers, bytes.NewReader(m))
|
||||
res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
if !successStatus(res.StatusCode) {
|
||||
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path)
|
||||
if isManifestInvalidError(errors.Cause(err)) {
|
||||
err = types.ManifestTypeRejectedError{Err: err}
|
||||
@@ -254,6 +246,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// successStatus returns true if the argument is a successful HTTP response
|
||||
// code (in the range 200 - 399 inclusive).
|
||||
func successStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
}
|
||||
|
||||
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
|
||||
func isManifestInvalidError(err error) bool {
|
||||
errors, ok := err.(errcode.Errors)
|
||||
@@ -275,7 +273,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := d.c.detectProperties(); err != nil {
|
||||
if err := d.c.detectProperties(context.TODO()); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
@@ -396,7 +394,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte
|
||||
// always adds signatures. Eventually we should also allow removing signatures,
|
||||
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(d.ref, d.manifestDigest)
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -438,7 +436,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest("PUT", path, nil, bytes.NewReader(body))
|
||||
res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
107
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -10,51 +11,33 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type dockerImageSource struct {
|
||||
ref dockerReference
|
||||
requestedManifestMIMETypes []string
|
||||
c *dockerClient
|
||||
ref dockerReference
|
||||
c *dockerClient
|
||||
// State
|
||||
cachedManifest []byte // nil if not loaded yet
|
||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||
}
|
||||
|
||||
// newImageSource creates a new ImageSource for the specified image reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
|
||||
c, err := newDockerClient(ctx, ref, false, "pull")
|
||||
func newImageSource(ctx *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
c, err := newDockerClientFromRef(ctx, ref, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if requestedManifestMIMETypes == nil {
|
||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
||||
}
|
||||
supportedMIMEs := supportedManifestMIMETypesMap()
|
||||
acceptableRequestedMIMEs := false
|
||||
for _, mtrequested := range requestedManifestMIMETypes {
|
||||
if supportedMIMEs[mtrequested] {
|
||||
acceptableRequestedMIMEs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !acceptableRequestedMIMEs {
|
||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
||||
c: c,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -85,18 +68,18 @@ func simplifyContentType(contentType string) string {
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
||||
err := s.ensureManifestIsLoaded()
|
||||
err := s.ensureManifestIsLoaded(context.TODO())
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) {
|
||||
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = s.requestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest("GET", path, headers, nil)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -114,7 +97,7 @@ func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, e
|
||||
// GetTargetManifest returns an image's manifest given a digest.
|
||||
// This is mainly used to retrieve a single image's manifest out of a manifest list.
|
||||
func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return s.fetchManifest(digest.String())
|
||||
return s.fetchManifest(context.TODO(), digest.String())
|
||||
}
|
||||
|
||||
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
|
||||
@@ -124,7 +107,7 @@ func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, str
|
||||
// we need to ensure that the digest of the manifest returned by GetManifest
|
||||
// and used by GetSignatures are consistent, otherwise we would get spurious
|
||||
// signature verification failures when pulling while a tag is being updated.
|
||||
func (s *dockerImageSource) ensureManifestIsLoaded() error {
|
||||
func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
|
||||
if s.cachedManifest != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -134,7 +117,7 @@ func (s *dockerImageSource) ensureManifestIsLoaded() error {
|
||||
return err
|
||||
}
|
||||
|
||||
manblob, mt, err := s.fetchManifest(reference)
|
||||
manblob, mt, err := s.fetchManifest(ctx, reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,13 +133,14 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false)
|
||||
resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Body != nil && err == nil {
|
||||
@@ -181,7 +165,7 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest("GET", path, nil, nil)
|
||||
res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -192,27 +176,38 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
if err := s.c.detectProperties(); err != nil {
|
||||
func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
if err := s.c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case s.c.signatureBase != nil:
|
||||
return s.getSignaturesFromLookaside()
|
||||
return s.getSignaturesFromLookaside(ctx)
|
||||
case s.c.supportsSignatures:
|
||||
return s.getSignaturesFromAPIExtension()
|
||||
return s.getSignaturesFromAPIExtension(ctx)
|
||||
default:
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest.
|
||||
func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) {
|
||||
if digested, ok := s.ref.ref.(reference.Digested); ok {
|
||||
d := digested.Digest()
|
||||
if d.Algorithm() == digest.Canonical {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return manifest.Digest(s.cachedManifest)
|
||||
}
|
||||
|
||||
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) {
|
||||
manifestDigest, err := s.manifestDigest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,7 +219,7 @@ func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
||||
if url == nil {
|
||||
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
signature, missing, err := s.getOneSignature(url)
|
||||
signature, missing, err := s.getOneSignature(ctx, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -239,7 +234,7 @@ func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
||||
// getOneSignature downloads one signature from url.
|
||||
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
||||
func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Reading %s", url.Path)
|
||||
@@ -254,7 +249,12 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
|
||||
case "http", "https":
|
||||
logrus.Debugf("GET %s", url)
|
||||
res, err := s.c.client.Get(url.String())
|
||||
req, err := http.NewRequest("GET", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := s.c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -276,16 +276,13 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
}
|
||||
|
||||
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension() ([][]byte, error) {
|
||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) {
|
||||
manifestDigest, err := s.manifestDigest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(s.ref, manifestDigest)
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -301,7 +298,7 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension() ([][]byte, error) {
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
c, err := newDockerClient(ctx, ref, true, "push")
|
||||
c, err := newDockerClientFromRef(ctx, ref, true, "push")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -316,7 +313,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest("GET", getPath, headers, nil)
|
||||
get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -338,7 +335,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest("DELETE", deletePath, headers, nil)
|
||||
delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@@ -130,12 +130,10 @@ func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, erro
|
||||
return newImage(ctx, ref)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref, requestedManifestMIMETypes)
|
||||
func (ref dockerReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
||||
2
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
@@ -9,12 +9,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||
|
||||