Compare commits
255 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e073df11aa | ||
|
|
8badcc2d02 | ||
|
|
a586779353 | ||
|
|
4eb654f10c | ||
|
|
f29314579d | ||
|
|
9e20c3d948 | ||
|
|
46c1a54b15 | ||
|
|
67b565da7b | ||
|
|
dd4a6aea97 | ||
|
|
9116598a2e | ||
|
|
df2a10d43f | ||
|
|
e9915937ac | ||
|
|
5a9c591abf | ||
|
|
531ef9159d | ||
|
|
811cf927d7 | ||
|
|
032b56ee8d | ||
|
|
6af847dd2a | ||
|
|
6b207f7b0c | ||
|
|
c14697ebe4 | ||
|
|
c35493248e | ||
|
|
d03a894969 | ||
|
|
fbb8b702bc | ||
|
|
815cedfc71 | ||
|
|
1c97f6ac2c | ||
|
|
bc9d574c10 | ||
|
|
c84db980ae | ||
|
|
85a37b39e8 | ||
|
|
49095a83f8 | ||
|
|
6c05a352df | ||
|
|
1849466827 | ||
|
|
4f38267342 | ||
|
|
9790b89771 | ||
|
|
61f5319504 | ||
|
|
947714fbd2 | ||
|
|
c615c3e23d | ||
|
|
d0e1ad1a1a | ||
|
|
b68f88c53d | ||
|
|
7dc787a9c7 | ||
|
|
2dbb2a13ed | ||
|
|
ad49b24d0b | ||
|
|
ba128004ca | ||
|
|
5179733c63 | ||
|
|
40c3a57d5a | ||
|
|
de9e71dda7 | ||
|
|
1052f3ba40 | ||
|
|
6bad262ff1 | ||
|
|
092591620b | ||
|
|
4d6c90e902 | ||
|
|
17d9a73329 | ||
|
|
fe2de4f491 | ||
|
|
adfb256a0f | ||
|
|
029bdbcbd0 | ||
|
|
fd995e6166 | ||
|
|
ae7d2f3547 | ||
|
|
86fa0803e8 | ||
|
|
81dfe0a964 | ||
|
|
9bff989832 | ||
|
|
b8740e386e | ||
|
|
9f5e1b3a77 | ||
|
|
01f8c7afee | ||
|
|
67e5341846 | ||
|
|
123493895f | ||
|
|
129fb109d5 | ||
|
|
979c945674 | ||
|
|
c77a8d39f1 | ||
|
|
f4151372e5 | ||
|
|
0705787a07 | ||
|
|
a5129ec3eb | ||
|
|
47ac96155f | ||
|
|
8b2b56d9b8 | ||
|
|
544e63de42 | ||
|
|
43a025ebf9 | ||
|
|
6116d6a9bc | ||
|
|
e5aa6c9fc5 | ||
|
|
95ca6c1e1f | ||
|
|
7244ef44fb | ||
|
|
9df6f62a4c | ||
|
|
bf01a80b2b | ||
|
|
ccd3b3fedb | ||
|
|
4b4e25868c | ||
|
|
4d943752fe | ||
|
|
9128a40ada | ||
|
|
8910199181 | ||
|
|
1fc5a49958 | ||
|
|
98f1533731 | ||
|
|
aae843123f | ||
|
|
77804bf256 | ||
|
|
7aaa21d70a | ||
|
|
ee9b8cde5a | ||
|
|
04ea079130 | ||
|
|
2dd03d6741 | ||
|
|
1680a5f0a0 | ||
|
|
5dd1a5f3c9 | ||
|
|
15792b227a | ||
|
|
38d3cddb0c | ||
|
|
a99d5f0798 | ||
|
|
53c3e6434d | ||
|
|
bf40000e72 | ||
|
|
fb99d85b76 | ||
|
|
85476bf093 | ||
|
|
819c227bf2 | ||
|
|
4b23819189 | ||
|
|
b893112a90 | ||
|
|
9fa477e303 | ||
|
|
b7e3320fe4 | ||
|
|
58025ee1be | ||
|
|
7a3bc6efd4 | ||
|
|
de0fb93f3d | ||
|
|
4419612150 | ||
|
|
5ececfad2c | ||
|
|
4f376bbb5e | ||
|
|
d03123204d | ||
|
|
0df1c44b12 | ||
|
|
75fbb8483e | ||
|
|
52e2737460 | ||
|
|
c83cd3fba9 | ||
|
|
d41ac23a03 | ||
|
|
dbebeb7235 | ||
|
|
9e129fd653 | ||
|
|
0a44c7f162 | ||
|
|
b12735358a | ||
|
|
318beaa720 | ||
|
|
f7dc659e52 | ||
|
|
35afa1c1f4 | ||
|
|
c71b655cfc | ||
|
|
ec9db747d9 | ||
|
|
3e8ded8646 | ||
|
|
966f32b2ac | ||
|
|
cde99f8517 | ||
|
|
01db066498 | ||
|
|
9653e2ba9a | ||
|
|
4d87007327 | ||
|
|
dbea38b440 | ||
|
|
0bc120edda | ||
|
|
297bfa6b30 | ||
|
|
58c078fc88 | ||
|
|
79663fe1a0 | ||
|
|
9a4e0e8a28 | ||
|
|
515386e1a7 | ||
|
|
49bf6fc095 | ||
|
|
d63314d737 | ||
|
|
b186786563 | ||
|
|
3cc0218280 | ||
|
|
b794edef6a | ||
|
|
5cc3c510c5 | ||
|
|
5aec4fe722 | ||
|
|
1513b82eed | ||
|
|
7d5e57f7ff | ||
|
|
8ecefa978c | ||
|
|
a673ac7ae6 | ||
|
|
99e512e3f2 | ||
|
|
166d4db597 | ||
|
|
c04748f3fb | ||
|
|
63e314ea22 | ||
|
|
0d6bf94eb6 | ||
|
|
f88cddfb4d | ||
|
|
0814bc19bd | ||
|
|
422ad51afb | ||
|
|
a5a3a7be11 | ||
|
|
a4b830a9fc | ||
|
|
68ccdd77fe | ||
|
|
6124673bbc | ||
|
|
cac2dd4dd8 | ||
|
|
70b57afda6 | ||
|
|
f6c2a1e24e | ||
|
|
480befa88f | ||
|
|
a3fef4879e | ||
|
|
330cfc923c | ||
|
|
0fc0551edd | ||
|
|
296a752555 | ||
|
|
3fbfb56001 | ||
|
|
50a6a566ca | ||
|
|
aca2c96602 | ||
|
|
57a0f38db6 | ||
|
|
ff39bf0b80 | ||
|
|
4b38cff005 | ||
|
|
89949a1156 | ||
|
|
97ec4563b4 | ||
|
|
47665ad777 | ||
|
|
e1e58584a9 | ||
|
|
62fc48433c | ||
|
|
a72aaa2268 | ||
|
|
9cbccf88cf | ||
|
|
de0d8cbdcf | ||
|
|
333899deb6 | ||
|
|
1d0b48d7da | ||
|
|
a2765bb1be | ||
|
|
c19c8f9503 | ||
|
|
f17bfb937f | ||
|
|
4e4ceff6cf | ||
|
|
ef532adb2f | ||
|
|
9327431e97 | ||
|
|
c9c735e20d | ||
|
|
f28dcb3751 | ||
|
|
9e088bd41d | ||
|
|
52087ca1c5 | ||
|
|
0de0d23df4 | ||
|
|
498f0ae9d7 | ||
|
|
ee91e6b981 | ||
|
|
265d2da6cf | ||
|
|
8eb7d6d610 | ||
|
|
94f2bf025a | ||
|
|
262b43a866 | ||
|
|
5259a84b7a | ||
|
|
bf83bc208d | ||
|
|
e616dc116a | ||
|
|
933c18f2ad | ||
|
|
be5bcd549d | ||
|
|
c845d7a5fe | ||
|
|
16d9d97d8c | ||
|
|
8e36b22a71 | ||
|
|
98c4e0d970 | ||
|
|
83fe25ca4e | ||
|
|
b7e9966fb2 | ||
|
|
8a3ccb53c4 | ||
|
|
3e9a075b48 | ||
|
|
95d9d22949 | ||
|
|
728f641179 | ||
|
|
8ce683f4fe | ||
|
|
fd7762b7e2 | ||
|
|
9333e5369d | ||
|
|
e92020a4db | ||
|
|
b9b2a8a7ef | ||
|
|
b37a981500 | ||
|
|
a500e22104 | ||
|
|
ac2aad6343 | ||
|
|
c8a887f512 | ||
|
|
f4a5511e83 | ||
|
|
a6f7d725a0 | ||
|
|
dd98523b8d | ||
|
|
98ca81073e | ||
|
|
5f80a1033b | ||
|
|
70518e7093 | ||
|
|
0c70609031 | ||
|
|
b1c6243f8a | ||
|
|
12a3abf6fa | ||
|
|
f46ed32a11 | ||
|
|
be2d536f52 | ||
|
|
72253654d5 | ||
|
|
a2bd274d11 | ||
|
|
416301306a | ||
|
|
a49a32f55f | ||
|
|
7af6ab2351 | ||
|
|
6d85cd3f7d | ||
|
|
d9a77b38fc | ||
|
|
e50fee5738 | ||
|
|
63ca9028bc | ||
|
|
62845372ad | ||
|
|
5458250462 | ||
|
|
8efeb7f4ac | ||
|
|
303a8df35d | ||
|
|
21b1a9349d | ||
|
|
8b99eae5e8 | ||
|
|
cd6b5870e2 | ||
|
|
02f5235773 |
28
.copr/Makefile
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
spec := contrib/rpm/buildah_copr.spec
|
||||
outdir := $(CURDIR)
|
||||
tmpdir := build
|
||||
gitdir := $(PWD)/.git
|
||||
|
||||
rev := $(shell sed 's/\(.......\).*/\1/' $(gitdir)/$$(sed -n '/^ref:/{s/.* //;p}' $(gitdir)/HEAD))
|
||||
date := $(shell date +%Y%m%d.%H%M)
|
||||
|
||||
version := $(shell sed -n '/Version:/{s/.* //;p}' $(spec))
|
||||
release := $(date).git.$(rev)
|
||||
|
||||
srpm: $(outdir)/buildah-$(version)-$(release).src.rpm
|
||||
|
||||
$(tmpdir)/buildah.spec: $(spec)
|
||||
@mkdir -p $(tmpdir)
|
||||
sed '/^Release:/s/\(: *\).*/\1$(release)%{?dist}/' $< >$@
|
||||
|
||||
$(tmpdir)/$(version).tar.gz: $(gitdir)/..
|
||||
@mkdir -p $(tmpdir)
|
||||
tar c --exclude-vcs --exclude-vcs-ignores -C $< --transform 's|^\.|buildah-$(version)|' . | gzip -9 >$@
|
||||
|
||||
$(outdir)/buildah-$(version)-$(release).src.rpm: $(tmpdir)/buildah.spec $(tmpdir)/$(version).tar.gz
|
||||
@mkdir -p $(outdir)
|
||||
rpmbuild -D'_srcrpmdir $(outdir)' -D'_sourcedir $(tmpdir)' -bs $(tmpdir)/buildah.spec
|
||||
|
||||
.PHONY: srpm
|
||||
65
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
<!--
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
If you suspect your issue is a bug, please edit your issue description to
|
||||
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||
information within 7 days, we cannot debug your issue and will close it. We
|
||||
will, however, reopen it if you later provide the information.
|
||||
|
||||
---------------------------------------------------
|
||||
BUG REPORT INFORMATION
|
||||
---------------------------------------------------
|
||||
Use the commands below to provide key information from your environment:
|
||||
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||
-->
|
||||
|
||||
**Description**
|
||||
|
||||
<!--
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
-->
|
||||
|
||||
**Steps to reproduce the issue:**
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
**Describe the results you received:**
|
||||
|
||||
|
||||
**Describe the results you expected:**
|
||||
|
||||
|
||||
**Output of `rpm -q buildah` or `apt list buildah`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `buildah version`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/*release`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `uname -a`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/containers/storage.conf`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
4
.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
docs/buildah*.1
|
||||
/buildah
|
||||
/imgtype
|
||||
/build/
|
||||
@@ -14,15 +14,21 @@ dnf install -y \
|
||||
device-mapper-devel \
|
||||
findutils \
|
||||
git \
|
||||
glib2-devel \
|
||||
gnupg \
|
||||
golang \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
libseccomp-devel \
|
||||
libselinux-devel \
|
||||
libselinux-utils \
|
||||
make \
|
||||
openssl \
|
||||
ostree-devel \
|
||||
which
|
||||
|
||||
# Red Hat CI adds a merge commit, for testing, which fails the
|
||||
# PAPR adds a merge commit, for testing, which fails the
|
||||
# short-commit-subject validation test, so tell git-validate.sh to only check
|
||||
# up to, but not including, the merge commit.
|
||||
export GITVALIDATE_TIP=$(cd $GOSRC; git log -2 --pretty='%H' | tail -n 1)
|
||||
make -C $GOSRC install.tools all validate
|
||||
$GOSRC/tests/test_runner.sh
|
||||
make -C $GOSRC install.tools runc all validate test-unit test-integration TAGS="seccomp"
|
||||
49
.papr.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
branches:
|
||||
- master
|
||||
- auto
|
||||
- try
|
||||
|
||||
host:
|
||||
distro: fedora/26/atomic
|
||||
|
||||
required: true
|
||||
|
||||
tests:
|
||||
# Let's create a self signed certificate and get it in the right places
|
||||
- hostname
|
||||
- ip a
|
||||
- ping -c 3 localhost
|
||||
- cat /etc/hostname
|
||||
- mkdir -p /home/travis/auth
|
||||
- openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
- cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
|
||||
- sudo mkdir -p /etc/docker/certs.d/docker.io/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
|
||||
- sudo mkdir -p /etc/docker/certs.d/localhost:5000/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
|
||||
# Create the credentials file, then start up the Docker registry
|
||||
- docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
|
||||
- docker run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
|
||||
|
||||
# Test Docker setup
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- ls -alF /home/travis/auth
|
||||
- docker pull alpine
|
||||
- docker login localhost:5000 --username testuser --password testpassword
|
||||
- docker tag alpine localhost:5000/my-alpine
|
||||
- docker push localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi docker.io/alpine
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
- docker pull localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
|
||||
# mount yum repos to inherit injected mirrors from PAPR
|
||||
- docker run --net=host --privileged -v /etc/yum.repos.d:/etc/yum.repos.d.host:ro
|
||||
-v $PWD:/code registry.fedoraproject.org/fedora:26 sh -c
|
||||
"cp -fv /etc/yum.repos.d{.host/*.repo,} && /code/.papr.sh"
|
||||
@@ -1,12 +0,0 @@
|
||||
branches:
|
||||
- master
|
||||
- auto
|
||||
- try
|
||||
|
||||
host:
|
||||
distro: fedora/25/atomic
|
||||
|
||||
required: true
|
||||
|
||||
tests:
|
||||
- docker run --privileged -v $PWD:/code fedora:25 /code/.redhat-ci.sh
|
||||
63
.travis.yml
@@ -1,14 +1,65 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- tip
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go:
|
||||
- 1.8
|
||||
- 1.9.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
# If the latest unstable development version of go fails, that's OK.
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
# Don't hold on the tip tests to finish. Mark tests green if the
|
||||
# stable versions pass.
|
||||
fast_finish: true
|
||||
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:duggan/bats
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -qq install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libselinux1-dev
|
||||
- sudo apt-get -qq remove libseccomp2
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get -qq install bats btrfs-tools git libdevmapper-dev libgpgme11-dev
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
- mkdir /home/travis/auth
|
||||
install:
|
||||
# Let's create a self signed certificate and get it in the right places
|
||||
- hostname
|
||||
- ip a
|
||||
- ping -c 3 localhost
|
||||
- cat /etc/hostname
|
||||
- openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
- cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
|
||||
- sudo mkdir -p /etc/docker/certs.d/docker.io/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
|
||||
- sudo mkdir -p /etc/docker/certs.d/localhost:5000/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
|
||||
# Create the credentials file, then start up the Docker registry
|
||||
- docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
|
||||
- docker run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
|
||||
script:
|
||||
- make install.tools all validate
|
||||
# Let's do some docker stuff just for verification purposes
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- ls -alF /home/travis/auth
|
||||
- docker pull alpine
|
||||
- docker login localhost:5000 --username testuser --password testpassword
|
||||
- docker tag alpine localhost:5000/my-alpine
|
||||
- docker push localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi docker.io/alpine
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
- docker pull localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
# Setting up Docker Registry is complete, let's do Buildah testing!
|
||||
- make install.tools install.libseccomp.sudo all runc validate TAGS="apparmor seccomp containers_image_ostree_stub"
|
||||
- go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
|
||||
- tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v -root $tmp/root -runroot $tmp/runroot -storage-driver vfs -signature-policy `pwd`/tests/policy.json
|
||||
- cd tests; sudo PATH="$PATH" ./test_runner.sh
|
||||
|
||||
80
CHANGELOG.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Changelog
|
||||
|
||||
## 0.5 - 2017-11-07
|
||||
Add secrets patch to buildah
|
||||
Add proper SELinux labeling to buildah run
|
||||
Add tls-verify to bud command
|
||||
Make filtering by date use the image's date
|
||||
images: don't list unnamed images twice
|
||||
Fix timeout issue
|
||||
Add further tty verbiage to buildah run
|
||||
Make inspect try an image on failure if type not specified
|
||||
Add support for `buildah run --hostname`
|
||||
Tons of bug fixes and code cleanup
|
||||
|
||||
## 0.4 - 2017-09-22
|
||||
### Added
|
||||
Update buildah spec file to match new version
|
||||
Bump to version 0.4
|
||||
Add default transport to push if not provided
|
||||
Add authentication to commit and push
|
||||
Remove --transport flag
|
||||
Run: don't complain about missing volume locations
|
||||
Add credentials to buildah from
|
||||
Remove export command
|
||||
Bump containers/storage and containers/image
|
||||
|
||||
## 0.3 - 2017-07-20
|
||||
## 0.2 - 2017-07-18
|
||||
### Added
|
||||
Vendor in latest containers/image and containers/storage
|
||||
Update image-spec and runtime-spec to v1.0.0
|
||||
Add support for -- ending options parsing to buildah run
|
||||
Add/Copy need to support glob syntax
|
||||
Add flag to remove containers on commit
|
||||
Add buildah export support
|
||||
update 'buildah images' and 'buildah rmi' commands
|
||||
buildah containers/image: Add JSON output option
|
||||
Add 'buildah version' command
|
||||
Handle "run" without an explicit command correctly
|
||||
Ensure volume points get created, and with perms
|
||||
Add a -a/--all option to "buildah containers"
|
||||
|
||||
## 0.1 - 2017-06-14
|
||||
### Added
|
||||
Vendor in latest container/storage container/image
|
||||
Add a "push" command
|
||||
Add an option to specify a Create date for images
|
||||
Allow building a source image from another image
|
||||
Improve buildah commit performance
|
||||
Add a --volume flag to "buildah run"
|
||||
Fix inspect/tag-by-truncated-image-ID
|
||||
Include image-spec and runtime-spec versions
|
||||
buildah mount command should list mounts when no arguments are given.
|
||||
Make the output image format selectable
|
||||
commit images in multiple formats
|
||||
Also import configurations from V2S1 images
|
||||
Add a "tag" command
|
||||
Add an "inspect" command
|
||||
Update reference comments for docker types origins
|
||||
Improve configuration preservation in imagebuildah
|
||||
Report pull/commit progress by default
|
||||
Contribute buildah.spec
|
||||
Remove --mount from buildah-from
|
||||
Add a build-using-dockerfile command (alias: bud)
|
||||
Create manpages for the buildah project
|
||||
Add installation for buildah and bash completions
|
||||
Rename "list"/"delete" to "containers"/"rm"
|
||||
Switch `buildah list quiet` option to only list container id's
|
||||
buildah delete should be able to delete multiple containers
|
||||
Correctly set tags on the names of pulled images
|
||||
Don't mix "config" in with "run" and "commit"
|
||||
Add a "list" command, for listing active builders
|
||||
Add "add" and "copy" commands
|
||||
Add a "run" command, using runc
|
||||
Massive refactoring
|
||||
Make a note to distinguish compression of layers
|
||||
|
||||
## 0.0 - 2017-01-26
|
||||
### Added
|
||||
Initial version, needs work
|
||||
142
CONTRIBUTING.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Contributing to Buildah
|
||||
|
||||
We'd love to have you join the community! Below summarizes the processes
|
||||
that we follow.
|
||||
|
||||
## Topics
|
||||
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/buildah/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
|
||||
If you find a new issue with the project we'd love to hear about it! The most
|
||||
important aspect of a bug report is that it includes enough information for
|
||||
us to reproduce it. So, please include as much detail as possible and try
|
||||
to remove the extra stuff that doesn't really relate to the issue itself.
|
||||
The easier it is for us to reproduce it, the faster it'll be fixed!
|
||||
|
||||
Please don't include any private/sensitive information in your issue!
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
No Pull Request (PR) is too small! Typos, additional comments in the code,
|
||||
new testcases, bug fixes, new features, more documentation, ... it's all
|
||||
welcome!
|
||||
|
||||
While bug fixes can first be identified via an "issue", that is not required.
|
||||
It's ok to just open up a PR with the fix, but make sure you include the same
|
||||
information you would have included in an issue - like how to reproduce it.
|
||||
|
||||
PRs for new features should include some background on what use cases the
|
||||
new code is trying to address. When possible and when it makes sense, try to break-up
|
||||
larger PRs into smaller ones - it's easier to review smaller
|
||||
code changes. But only if those smaller ones make sense as stand-alone PRs.
|
||||
|
||||
Regardless of the type of PR, all PRs should include:
|
||||
* well documented code changes
|
||||
* additional testcases. Ideally, they should fail w/o your code change applied
|
||||
* documentation changes
|
||||
|
||||
Squash your commits into logical pieces of work that might want to be reviewed
|
||||
separate from the rest of the PRs. But, squashing down to just one commit is ok
|
||||
too since in the end the entire PR will be reviewed anyway. When in doubt,
|
||||
squash.
|
||||
|
||||
PRs that fix issues should include a reference like `Closes #XXXX` in the
|
||||
commit message so that github will automatically close the referenced issue
|
||||
when the PR is merged.
|
||||
|
||||
<!--
|
||||
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
|
||||
-->
|
||||
|
||||
### Sign your PRs
|
||||
|
||||
The sign-off is a line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `cri-o`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/buildah/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/buildah/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
## Becoming a Maintainer
|
||||
|
||||
To become a maintainer you must first be nominated by an existing maintainer.
|
||||
If a majority (>50%) of maintainers agree then the proposal is adopted and
|
||||
you will be added to the list.
|
||||
|
||||
Removing a maintainer requires at least 75% of the remaining maintainers
|
||||
approval, or if the person requests to be removed then it is automatic.
|
||||
Normally, a maintainer will only be removed if they are considered to be
|
||||
inactive for a long period of time or are viewed as disruptive to the community.
|
||||
|
||||
The current list of maintainers can be found in the
|
||||
[MAINTAINERS](MAINTAINERS) file.
|
||||
-->
|
||||
54
Makefile
@@ -1,17 +1,30 @@
|
||||
AUTOTAGS := $(shell ./btrfs_tag.sh) $(shell ./libdm_tag.sh)
|
||||
AUTOTAGS := $(shell ./btrfs_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh) $(shell ./selinux_tag.sh)
|
||||
TAGS := seccomp
|
||||
PREFIX := /usr/local
|
||||
BINDIR := $(PREFIX)/bin
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)"
|
||||
GO := go
|
||||
|
||||
all: buildah docs
|
||||
GIT_COMMIT := $(shell git rev-parse --short HEAD)
|
||||
BUILD_INFO := $(shell date +%s)
|
||||
|
||||
RUNC_COMMIT := c5ec25487693612aed95673800863e134785f946
|
||||
LIBSECCOMP_COMMIT := release-2.3
|
||||
|
||||
LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO}'
|
||||
|
||||
all: buildah imgtype docs
|
||||
|
||||
buildah: *.go imagebuildah/*.go cmd/buildah/*.go docker/*.go util/*.go
|
||||
go build -o buildah $(BUILDFLAGS) ./cmd/buildah
|
||||
$(GO) build $(LDFLAGS) -o buildah $(BUILDFLAGS) ./cmd/buildah
|
||||
|
||||
imgtype: *.go docker/*.go util/*.go tests/imgtype.go
|
||||
$(GO) build $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype.go
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(RM) buildah
|
||||
$(RM) buildah imgtype build
|
||||
$(MAKE) -C docs clean
|
||||
|
||||
.PHONY: docs
|
||||
@@ -38,11 +51,24 @@ validate:
|
||||
|
||||
.PHONY: install.tools
|
||||
install.tools:
|
||||
go get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
|
||||
go get -u $(BUILDFLAGS) github.com/vbatts/git-validation
|
||||
go get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
|
||||
$(GO) get -u $(BUILDFLAGS) github.com/cpuguy83/go-md2man
|
||||
$(GO) get -u $(BUILDFLAGS) github.com/vbatts/git-validation
|
||||
$(GO) get -u $(BUILDFLAGS) gopkg.in/alecthomas/gometalinter.v1
|
||||
gometalinter.v1 -i
|
||||
|
||||
.PHONY: runc
|
||||
runc: gopath
|
||||
rm -rf ../../opencontainers/runc
|
||||
git clone https://github.com/opencontainers/runc ../../opencontainers/runc
|
||||
cd ../../opencontainers/runc && git checkout $(RUNC_COMMIT) && $(GO) build -tags "$(AUTOTAGS) $(TAGS)"
|
||||
ln -sf ../../opencontainers/runc/runc
|
||||
|
||||
.PHONY: install.libseccomp.sudo
|
||||
install.libseccomp.sudo: gopath
|
||||
rm -rf ../../seccomp/libseccomp
|
||||
git clone https://github.com/seccomp/libseccomp ../../seccomp/libseccomp
|
||||
cd ../../seccomp/libseccomp && git checkout $(LIBSECCOMP_COMMIT) && ./autogen.sh && ./configure --prefix=/usr && make all && sudo make install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
install -D -m0755 buildah $(DESTDIR)/$(BINDIR)/buildah
|
||||
@@ -51,3 +77,17 @@ install:
|
||||
.PHONY: install.completions
|
||||
install.completions:
|
||||
install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/${BASHINSTALLDIR}/buildah
|
||||
|
||||
.PHONY: install.runc
|
||||
install.runc:
|
||||
install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration:
|
||||
cd tests; ./test_runner.sh
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit:
|
||||
tmp=$(shell mktemp -d) ; \
|
||||
mkdir -p $$tmp/root $$tmp/runroot; \
|
||||
$(GO) test -v -tags "$(AUTOTAGS) $(TAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json
|
||||
|
||||
117
README.md
@@ -1,4 +1,6 @@
|
||||
buildah - a tool which facilitates building OCI container images
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool which facilitates building OCI container images
|
||||
================================================================
|
||||
|
||||
[](https://goreportcard.com/report/github.com/projectatomic/buildah)
|
||||
@@ -6,7 +8,7 @@ buildah - a tool which facilitates building OCI container images
|
||||
|
||||
Note: this package is in alpha, but is close to being feature-complete.
|
||||
|
||||
The buildah package provides a command line tool which can be used to
|
||||
The Buildah package provides a command line tool which can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
@@ -15,74 +17,61 @@ The buildah package provides a command line tool which can be used to
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
|
||||
**Installation notes**
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
Prior to installing buildah, install the following packages on your linux distro:
|
||||
* make
|
||||
* golang (Requires version 1.8.1 or higher.)
|
||||
* bats
|
||||
* btrfs-progs-devel
|
||||
* device-mapper-devel
|
||||
* gpgme-devel
|
||||
* libassuan-devel
|
||||
* git
|
||||
* bzip2
|
||||
* go-md2man
|
||||
* skopeo-containers
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
In Fedora, you can use this command:
|
||||
**[Tutorials](docs/tutorials/tutorials.md)**
|
||||
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<EOF
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=`buildah from ${1:-fedora}`
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run $ctr1 -- dnf update -y
|
||||
buildah run $ctr1 -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config $ctr1 --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf"
|
||||
buildah config $ctr1 --port 80
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit $ctr1 ${2:-$USER/lighttpd}
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
dnf -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
Then to install buildah follow the steps in this example:
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
make
|
||||
make install
|
||||
buildah --help
|
||||
```
|
||||
|
||||
buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
|
||||
[runc](https://github.com/opencontainers/runc) for buildah to call for those cases.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| --------------------- | --------------------------------------------------- |
|
||||
| buildah-add(1) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| buildah-bud(1) | Build an image using instructions from Dockerfiles. |
|
||||
| buildah-commit(1) | Create an image from a working container. |
|
||||
| buildah-config(1) | Update image configuration settings. |
|
||||
| buildah-containers(1) | List the working containers and their base images. |
|
||||
| buildah-copy(1) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| buildah-from(1) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| buildah-images(1) | List images in local storage. |
|
||||
| buildah-inspect(1) | Inspects the configuration of a container or image. |
|
||||
| buildah-mount(1) | Mount the working container's root filesystem. |
|
||||
| buildah-push(1) | Copies an image from local storage. |
|
||||
| buildah-rm(1) | Removes one or more working containers. |
|
||||
| buildah-rmi(1) | Removes one or more images. |
|
||||
| buildah-run(1) | Run a command inside of the container. |
|
||||
| buildah-tag(1) | Add an additional name to a local image. |
|
||||
| buildah-umount(1) | Unmount a working container's root file system. |
|
||||
| Command | Description |
|
||||
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
|
||||
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
|
||||
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
|
||||
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
|
||||
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
|
||||
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
|
||||
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
|
||||
|
||||
**Future goals include:**
|
||||
* more CI tests
|
||||
|
||||
171
add.go
@@ -8,14 +8,21 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/libpod/pkg/chrootuser"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
//AddAndCopyOptions holds options for add and copy commands.
|
||||
type AddAndCopyOptions struct {
|
||||
Chown string
|
||||
}
|
||||
|
||||
// addURL copies the contents of the source URL to the destination. This is
|
||||
// its own function so that deferred closes happen after we're done pulling
|
||||
// down each item of potentially many.
|
||||
@@ -58,8 +65,8 @@ func addURL(destination, srcurl string) error {
|
||||
// Add copies the contents of the specified sources into the container's root
|
||||
// filesystem, optionally extracting contents of local files that look like
|
||||
// non-empty archives.
|
||||
func (b *Builder) Add(destination string, extract bool, source ...string) error {
|
||||
mountPoint, err := b.Mount("")
|
||||
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -68,12 +75,17 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
// Find out which user (and group) the destination should belong to.
|
||||
user, err := b.user(mountPoint, options.Chown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest := mountPoint
|
||||
if destination != "" && filepath.IsAbs(destination) {
|
||||
dest = filepath.Join(dest, destination)
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Join(dest, b.WorkDir()), 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists)", filepath.Join(dest, b.WorkDir()))
|
||||
if err = ensureDir(filepath.Join(dest, b.WorkDir()), user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
dest = filepath.Join(dest, b.WorkDir(), destination)
|
||||
}
|
||||
@@ -81,8 +93,8 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
// with a '/', create it so that we can be sure that it's a directory,
|
||||
// and any files we're copying will be placed in the directory.
|
||||
if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
|
||||
if err = os.MkdirAll(dest, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", dest)
|
||||
if err = ensureDir(dest, user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Make sure the destination's parent directory is usable.
|
||||
@@ -118,46 +130,123 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
if err := addURL(d, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setOwner("", d, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
srcfi, err := os.Stat(src)
|
||||
|
||||
glob, err := filepath.Glob(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", src)
|
||||
return errors.Wrapf(err, "invalid glob %q", src)
|
||||
}
|
||||
if srcfi.IsDir() {
|
||||
// The source is a directory, so copy the contents of
|
||||
// the source directory into the target directory. Try
|
||||
// to create it first, so that if there's a problem,
|
||||
// we'll discover why that won't work.
|
||||
d := dest
|
||||
if err := os.MkdirAll(d, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", d)
|
||||
}
|
||||
logrus.Debugf("copying %q to %q", src+string(os.PathSeparator)+"*", d+string(os.PathSeparator)+"*")
|
||||
if err := chrootarchive.CopyWithTar(src, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", src, d)
|
||||
}
|
||||
continue
|
||||
if len(glob) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
|
||||
}
|
||||
if !extract || !archive.IsArchivePath(src) {
|
||||
// This source is a file, and either it's not an
|
||||
// archive, or we don't care whether or not it's an
|
||||
// archive.
|
||||
d := dest
|
||||
if destfi != nil && destfi.IsDir() {
|
||||
d = filepath.Join(dest, filepath.Base(src))
|
||||
for _, gsrc := range glob {
|
||||
srcfi, err := os.Stat(gsrc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", gsrc)
|
||||
}
|
||||
// Copy the file, preserving attributes.
|
||||
logrus.Debugf("copying %q to %q", src, d)
|
||||
if err := chrootarchive.CopyFileWithTar(src, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", src, d)
|
||||
if srcfi.IsDir() {
|
||||
// The source is a directory, so copy the contents of
|
||||
// the source directory into the target directory. Try
|
||||
// to create it first, so that if there's a problem,
|
||||
// we'll discover why that won't work.
|
||||
if err = ensureDir(dest, user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
|
||||
if err := copyWithTar(gsrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, dest)
|
||||
}
|
||||
if err := setOwner(gsrc, dest, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !extract || !archive.IsArchivePath(gsrc) {
|
||||
// This source is a file, and either it's not an
|
||||
// archive, or we don't care whether or not it's an
|
||||
// archive.
|
||||
d := dest
|
||||
if destfi != nil && destfi.IsDir() {
|
||||
d = filepath.Join(dest, filepath.Base(gsrc))
|
||||
}
|
||||
// Copy the file, preserving attributes.
|
||||
logrus.Debugf("copying %q to %q", gsrc, d)
|
||||
if err := copyFileWithTar(gsrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||
}
|
||||
if err := setOwner(gsrc, d, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We're extracting an archive into the destination directory.
|
||||
logrus.Debugf("extracting contents of %q into %q", gsrc, dest)
|
||||
if err := untarPath(gsrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error extracting %q into %q", gsrc, dest)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We're extracting an archive into the destination directory.
|
||||
logrus.Debugf("extracting contents of %q into %q", src, dest)
|
||||
if err := chrootarchive.UntarPath(src, dest); err != nil {
|
||||
return errors.Wrapf(err, "error extracting %q into %q", src, dest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// user returns the user (and group) information which the destination should belong to.
|
||||
func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
|
||||
if userspec == "" {
|
||||
userspec = b.User()
|
||||
}
|
||||
|
||||
uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
|
||||
u := specs.User{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Username: userspec,
|
||||
}
|
||||
return u, err
|
||||
}
|
||||
|
||||
// setOwner sets the uid and gid owners of a given path.
|
||||
func setOwner(src, dest string, user specs.User) error {
|
||||
fid, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", dest)
|
||||
}
|
||||
if !fid.IsDir() || src == "" {
|
||||
if err := os.Lchown(dest, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err, "error setting ownership of %q", dest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = filepath.Walk(src, func(p string, info os.FileInfo, we error) error {
|
||||
relPath, err2 := filepath.Rel(src, p)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error getting relative path of %q to set ownership on destination", p)
|
||||
}
|
||||
if relPath != "." {
|
||||
absPath := filepath.Join(dest, relPath)
|
||||
if err2 := os.Lchown(absPath, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err2, "error setting ownership of %q", absPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error walking dir %q to set ownership", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDir creates a directory if it doesn't exist, setting ownership and permissions as passed by user and perm.
|
||||
func ensureDir(path string, user specs.User, perm os.FileMode) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(path, perm); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", path)
|
||||
}
|
||||
if err := os.Chown(path, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err, "error setting ownership of %q", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
75
buildah.go
@@ -7,6 +7,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -19,9 +20,17 @@ const (
|
||||
// identify working containers.
|
||||
Package = "buildah"
|
||||
// Version for the Package
|
||||
Version = "0.1"
|
||||
Version = "0.12"
|
||||
// The value we use to identify what type of information, currently a
|
||||
// serialized Builder structure, we are using as per-container state.
|
||||
// This should only be changed when we make incompatible changes to
|
||||
// that data structure, as it's used to distinguish containers which
|
||||
// are "ours" from ones that aren't.
|
||||
containerType = Package + " 0.0.1"
|
||||
stateFile = Package + ".json"
|
||||
// The file in the per-container directory which we use to store our
|
||||
// per-container state. If it isn't there, then the container isn't
|
||||
// one of our build containers.
|
||||
stateFile = Package + ".json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,6 +77,10 @@ type Builder struct {
|
||||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
// ProcessLabel is the SELinux process label associated with the container
|
||||
ProcessLabel string `json:"process-label,omitempty"`
|
||||
// MountLabel is the SELinux mount label associated with the container
|
||||
MountLabel string `json:"mount-label,omitempty"`
|
||||
|
||||
// ImageAnnotations is a set of key-value pairs which is stored in the
|
||||
// image's manifest.
|
||||
@@ -78,6 +91,48 @@ type Builder struct {
|
||||
// Image metadata and runtime settings, in multiple formats.
|
||||
OCIv1 v1.Image `json:"ociv1,omitempty"`
|
||||
Docker docker.V2Image `json:"docker,omitempty"`
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
|
||||
}
|
||||
|
||||
// BuilderInfo are used as objects to display container information
|
||||
type BuilderInfo struct {
|
||||
Type string
|
||||
FromImage string
|
||||
FromImageID string
|
||||
Config string
|
||||
Manifest string
|
||||
Container string
|
||||
ContainerID string
|
||||
MountPoint string
|
||||
ProcessLabel string
|
||||
MountLabel string
|
||||
ImageAnnotations map[string]string
|
||||
ImageCreatedBy string
|
||||
OCIv1 v1.Image
|
||||
Docker docker.V2Image
|
||||
DefaultMountsFilePath string
|
||||
}
|
||||
|
||||
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
|
||||
// This is used in the inspect command to display Manifest and Config as string and not []byte.
|
||||
func GetBuildInfo(b *Builder) BuilderInfo {
|
||||
return BuilderInfo{
|
||||
Type: b.Type,
|
||||
FromImage: b.FromImage,
|
||||
FromImageID: b.FromImageID,
|
||||
Config: string(b.Config),
|
||||
Manifest: string(b.Manifest),
|
||||
Container: b.Container,
|
||||
ContainerID: b.ContainerID,
|
||||
MountPoint: b.MountPoint,
|
||||
ProcessLabel: b.ProcessLabel,
|
||||
ImageAnnotations: b.ImageAnnotations,
|
||||
ImageCreatedBy: b.ImageCreatedBy,
|
||||
OCIv1: b.OCIv1,
|
||||
Docker: b.Docker,
|
||||
DefaultMountsFilePath: b.DefaultMountsFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
// BuilderOptions are used to initialize a new Builder.
|
||||
@@ -95,8 +150,13 @@ type BuilderOptions struct {
|
||||
PullPolicy int
|
||||
// Registry is a value which is prepended to the image's name, if it
|
||||
// needs to be pulled and the image name alone can not be resolved to a
|
||||
// reference to a source image.
|
||||
// reference to a source image. No separator is implicitly added.
|
||||
Registry string
|
||||
// Transport is a value which is prepended to the image's name, if it
|
||||
// needs to be pulled and the image name alone, or the image name and
|
||||
// the registry together, can not be resolved to a reference to a
|
||||
// source image. No separator is implicitly added.
|
||||
Transport string
|
||||
// Mount signals to NewBuilder() that the container should be mounted
|
||||
// immediately.
|
||||
Mount bool
|
||||
@@ -109,6 +169,11 @@ type BuilderOptions struct {
|
||||
// ReportWriter is an io.Writer which will be used to log the reading
|
||||
// of the source image from a registry, if we end up pulling the image.
|
||||
ReportWriter io.Writer
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
DefaultMountsFilePath string
|
||||
}
|
||||
|
||||
// ImportOptions are used to initialize a Builder from an existing container
|
||||
@@ -134,6 +199,10 @@ type ImportFromImageOptions struct {
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// github.com/containers/image/types SystemContext to hold information
|
||||
// about which registries we should check for completing image names
|
||||
// that don't include a domain portion.
|
||||
SystemContext *types.SystemContext
|
||||
}
|
||||
|
||||
// NewBuilder creates a new build container.
|
||||
|
||||
@@ -2,10 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
addAndCopyFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "chown",
|
||||
Usage: "Set the user and group ownership of the destination content",
|
||||
},
|
||||
}
|
||||
addDescription = "Adds the contents of a file, URL, or directory to a container's working\n directory. If a local file appears to be an archive, its contents are\n extracted and added instead of the archive file itself."
|
||||
copyDescription = "Copies the contents of a file, URL, or directory into a container's working\n directory"
|
||||
|
||||
@@ -13,6 +20,7 @@ var (
|
||||
Name: "add",
|
||||
Usage: "Add content to the container",
|
||||
Description: addDescription,
|
||||
Flags: addAndCopyFlags,
|
||||
Action: addCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [[FILE | DIRECTORY | URL] ...] [DESTINATION]",
|
||||
}
|
||||
@@ -21,6 +29,7 @@ var (
|
||||
Name: "copy",
|
||||
Usage: "Copy content into the container",
|
||||
Description: copyDescription,
|
||||
Flags: addAndCopyFlags,
|
||||
Action: copyCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [[FILE | DIRECTORY | URL] ...] [DESTINATION]",
|
||||
}
|
||||
@@ -34,7 +43,11 @@ func addAndCopyCmd(c *cli.Context, extractLocalArchives bool) error {
|
||||
name := args[0]
|
||||
args = args.Tail()
|
||||
|
||||
// If list is greater then one, the last item is the destination
|
||||
if err := validateFlags(c, addAndCopyFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If list is greater than one, the last item is the destination
|
||||
dest := ""
|
||||
size := len(args)
|
||||
if size > 1 {
|
||||
@@ -52,8 +65,11 @@ func addAndCopyCmd(c *cli.Context, extractLocalArchives bool) error {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
|
||||
err = builder.Add(dest, extractLocalArchives, args...)
|
||||
if err != nil {
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chown: c.String("chown"),
|
||||
}
|
||||
|
||||
if err := builder.Add(dest, extractLocalArchives, options, args...); err != nil {
|
||||
return errors.Wrapf(err, "error adding content to container %q", builder.Container)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,22 +5,39 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/imagebuildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
budFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "refrain from announcing build instructions and image read/write progress",
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "build-arg",
|
||||
Usage: "`argument=value` to supply to the builder",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registry",
|
||||
Usage: "prefix to prepend to the image name in order to pull the image",
|
||||
Value: DefaultRegistry,
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at the specified path to access the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file, f",
|
||||
Usage: "`pathname or URL` of a Dockerfile",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "`format` of the built image's manifest and metadata",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "pull",
|
||||
@@ -30,13 +47,9 @@ var (
|
||||
Name: "pull-always",
|
||||
Usage: "pull the image, even if a version is present",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "build-arg",
|
||||
Usage: "`argument=value` to supply to the builder",
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "refrain from announcing build instructions and image read/write progress",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "runtime",
|
||||
@@ -48,18 +61,19 @@ var (
|
||||
Usage: "add global flags for the container runtime",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "`format` of the built image's manifest and metadata",
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "tag, t",
|
||||
Usage: "`tag` to apply to the built image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file, f",
|
||||
Usage: "`pathname or URL` of a Dockerfile",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
|
||||
budDescription = "Builds an OCI image using instructions in one or more Dockerfiles."
|
||||
budCommand = cli.Command{
|
||||
Name: "build-using-dockerfile",
|
||||
@@ -82,39 +96,14 @@ func budCmd(c *cli.Context) error {
|
||||
tags = tags[1:]
|
||||
}
|
||||
}
|
||||
registry := DefaultRegistry
|
||||
if c.IsSet("registry") {
|
||||
registry = c.String("registry")
|
||||
}
|
||||
pull := true
|
||||
if c.IsSet("pull") {
|
||||
pull = c.BoolT("pull")
|
||||
}
|
||||
pullAlways := false
|
||||
if c.IsSet("pull-always") {
|
||||
pull = c.Bool("pull-always")
|
||||
}
|
||||
runtimeFlags := []string{}
|
||||
if c.IsSet("runtime-flag") {
|
||||
runtimeFlags = c.StringSlice("runtime-flag")
|
||||
}
|
||||
runtime := ""
|
||||
if c.IsSet("runtime") {
|
||||
runtime = c.String("runtime")
|
||||
}
|
||||
|
||||
pullPolicy := imagebuildah.PullNever
|
||||
if pull {
|
||||
if c.BoolT("pull") {
|
||||
pullPolicy = imagebuildah.PullIfMissing
|
||||
}
|
||||
if pullAlways {
|
||||
if c.Bool("pull-always") {
|
||||
pullPolicy = imagebuildah.PullAlways
|
||||
}
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
args := make(map[string]string)
|
||||
if c.IsSet("build-arg") {
|
||||
for _, arg := range c.StringSlice("build-arg") {
|
||||
@@ -126,14 +115,8 @@ func budCmd(c *cli.Context) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
dockerfiles := []string{}
|
||||
if c.IsSet("file") || c.IsSet("f") {
|
||||
dockerfiles = c.StringSlice("file")
|
||||
}
|
||||
|
||||
dockerfiles := c.StringSlice("file")
|
||||
format := "oci"
|
||||
if c.IsSet("format") {
|
||||
format = strings.ToLower(c.String("format"))
|
||||
@@ -199,27 +182,40 @@ func budCmd(c *cli.Context) error {
|
||||
if len(dockerfiles) == 0 {
|
||||
dockerfiles = append(dockerfiles, filepath.Join(contextDir, "Dockerfile"))
|
||||
}
|
||||
if err := validateFlags(c, budFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
runtimeFlags := []string{}
|
||||
for _, arg := range c.StringSlice("runtime-flag") {
|
||||
runtimeFlags = append(runtimeFlags, "--"+arg)
|
||||
}
|
||||
|
||||
options := imagebuildah.BuildOptions{
|
||||
ContextDirectory: contextDir,
|
||||
PullPolicy: pullPolicy,
|
||||
Registry: registry,
|
||||
Compression: imagebuildah.Gzip,
|
||||
Quiet: quiet,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
Quiet: c.Bool("quiet"),
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
Args: args,
|
||||
Output: output,
|
||||
AdditionalTags: tags,
|
||||
Runtime: runtime,
|
||||
Runtime: c.String("runtime"),
|
||||
RuntimeArgs: runtimeFlags,
|
||||
OutputFormat: format,
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
|
||||
@@ -10,22 +10,38 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
commitFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at the specified path to access the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`format` of the image manifest and metadata",
|
||||
Value: "oci",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when writing images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "reference-time",
|
||||
@@ -33,8 +49,16 @@ var (
|
||||
Hidden: true,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when writing images",
|
||||
Name: "rm",
|
||||
Usage: "remove the container and its content after committing it to an image. Default leaves the container and its content in place.",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "Require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
commitDescription = "Writes a new image using the container's read-write layer and, if it is based\n on an image, the layers of that image"
|
||||
@@ -62,22 +86,13 @@ func commitCmd(c *cli.Context) error {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
image := args[0]
|
||||
if err := validateFlags(c, commitFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
compress := archive.Uncompressed
|
||||
if !c.IsSet("disable-compression") || !c.Bool("disable-compression") {
|
||||
compress = archive.Gzip
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
format := "oci"
|
||||
if c.IsSet("format") {
|
||||
format = c.String("format")
|
||||
compress := archive.Gzip
|
||||
if c.Bool("disable-compression") {
|
||||
compress = archive.Uncompressed
|
||||
}
|
||||
timestamp := time.Now().UTC()
|
||||
if c.IsSet("reference-time") {
|
||||
@@ -88,6 +103,8 @@ func commitCmd(c *cli.Context) error {
|
||||
}
|
||||
timestamp = finfo.ModTime().UTC()
|
||||
}
|
||||
|
||||
format := c.String("format")
|
||||
if strings.HasPrefix(strings.ToLower(format), "oci") {
|
||||
format = buildah.OCIv1ImageManifest
|
||||
} else if strings.HasPrefix(strings.ToLower(format), "docker") {
|
||||
@@ -114,19 +131,31 @@ func commitCmd(c *cli.Context) error {
|
||||
dest = dest2
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
options := buildah.CommitOptions{
|
||||
PreferredManifestType: format,
|
||||
Compression: compress,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
HistoryTimestamp: ×tamp,
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
err = builder.Commit(dest, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error committing container %q to %q", builder.Container, image)
|
||||
return util.GetFailureCause(
|
||||
err,
|
||||
errors.Wrapf(err, "error committing container %q to %q", builder.Container, image),
|
||||
)
|
||||
}
|
||||
|
||||
if c.Bool("rm") {
|
||||
return builder.Delete()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
var needToShutdownStore = false
|
||||
@@ -58,9 +66,10 @@ func openBuilders(store storage.Store) (builders []*buildah.Builder, err error)
|
||||
return buildah.OpenAllBuilders(store)
|
||||
}
|
||||
|
||||
func openImage(store storage.Store, name string) (builder *buildah.Builder, err error) {
|
||||
func openImage(sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) {
|
||||
options := buildah.ImportFromImageOptions{
|
||||
Image: name,
|
||||
Image: name,
|
||||
SystemContext: sc,
|
||||
}
|
||||
builder, err = buildah.ImportBuilderFromImage(store, options)
|
||||
if err != nil {
|
||||
@@ -71,3 +80,140 @@ func openImage(store storage.Store, name string) (builder *buildah.Builder, err
|
||||
}
|
||||
return builder, nil
|
||||
}
|
||||
|
||||
func getDateAndDigestAndSize(image storage.Image, store storage.Store) (time.Time, string, int64, error) {
|
||||
created := time.Time{}
|
||||
is.Transport.SetStore(store)
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return created, "", -1, err
|
||||
}
|
||||
img, err := storeRef.NewImage(nil)
|
||||
if err != nil {
|
||||
return created, "", -1, err
|
||||
}
|
||||
defer img.Close()
|
||||
imgSize, sizeErr := img.Size()
|
||||
if sizeErr != nil {
|
||||
imgSize = -1
|
||||
}
|
||||
manifest, _, manifestErr := img.Manifest()
|
||||
manifestDigest := ""
|
||||
if manifestErr == nil && len(manifest) > 0 {
|
||||
manifestDigest = digest.Canonical.FromBytes(manifest).String()
|
||||
}
|
||||
inspectInfo, inspectErr := img.Inspect()
|
||||
if inspectErr == nil && inspectInfo != nil {
|
||||
created = inspectInfo.Created
|
||||
}
|
||||
if sizeErr != nil {
|
||||
err = sizeErr
|
||||
} else if manifestErr != nil {
|
||||
err = manifestErr
|
||||
} else if inspectErr != nil {
|
||||
err = inspectErr
|
||||
}
|
||||
return created, manifestDigest, imgSize, err
|
||||
}
|
||||
|
||||
// systemContextFromOptions returns a SystemContext populated with values
|
||||
// per the input parameters provided by the caller for the use in authentication.
|
||||
func systemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
DockerCertPath: c.String("cert-dir"),
|
||||
}
|
||||
if c.IsSet("tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify")
|
||||
}
|
||||
if c.IsSet("creds") {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if c.IsSet("signature-policy") {
|
||||
ctx.SignaturePolicyPath = c.String("signature-policy")
|
||||
}
|
||||
if c.IsSet("authfile") {
|
||||
ctx.AuthFilePath = c.String("authfile")
|
||||
}
|
||||
if c.GlobalIsSet("registries-conf") {
|
||||
ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf")
|
||||
}
|
||||
if c.GlobalIsSet("registries-conf-dir") {
|
||||
ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir")
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string) {
|
||||
if creds == "" {
|
||||
return "", ""
|
||||
}
|
||||
up := strings.SplitN(creds, ":", 2)
|
||||
if len(up) == 1 {
|
||||
return up[0], ""
|
||||
}
|
||||
if up[0] == "" {
|
||||
return "", up[1]
|
||||
}
|
||||
return up[0], up[1]
|
||||
}
|
||||
|
||||
func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
username, password := parseCreds(creds)
|
||||
if username == "" {
|
||||
fmt.Print("Username: ")
|
||||
fmt.Scanln(&username)
|
||||
}
|
||||
if password == "" {
|
||||
fmt.Print("Password: ")
|
||||
termPassword, err := terminal.ReadPassword(0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read password from terminal")
|
||||
}
|
||||
password = string(termPassword)
|
||||
}
|
||||
|
||||
return &types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validateFlags searches for StringFlags or StringSlice flags that never had
|
||||
// a value set. This commonly occurs when the CLI mistakenly takes the next
|
||||
// option and uses it as a value.
|
||||
func validateFlags(c *cli.Context, flags []cli.Flag) error {
|
||||
re, err := regexp.Compile("^-.+")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compiling regex failed")
|
||||
}
|
||||
|
||||
for _, flag := range flags {
|
||||
switch reflect.TypeOf(flag).String() {
|
||||
case "cli.StringSliceFlag":
|
||||
{
|
||||
f := flag.(cli.StringSliceFlag)
|
||||
name := strings.Split(f.Name, ",")
|
||||
val := c.StringSlice(name[0])
|
||||
for _, v := range val {
|
||||
if ok := re.MatchString(v); ok {
|
||||
return errors.Errorf("option --%s requires a value", name[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
case "cli.StringFlag":
|
||||
{
|
||||
f := flag.(cli.StringFlag)
|
||||
name := strings.Split(f.Name, ",")
|
||||
val := c.String(name[0])
|
||||
if ok := re.MatchString(val); ok {
|
||||
return errors.Errorf("option --%s requires a value", name[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
123
cmd/buildah/common_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"os/user"
|
||||
"testing"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
signaturePolicyPath = ""
|
||||
storeOptions = storage.DefaultStoreOptions
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.StringVar(&signaturePolicyPath, "signature-policy", "", "pathname of signature policy file (not usually used)")
|
||||
options := storage.StoreOptions{}
|
||||
debug := false
|
||||
flag.StringVar(&options.GraphRoot, "root", "", "storage root dir")
|
||||
flag.StringVar(&options.RunRoot, "runroot", "", "storage state dir")
|
||||
flag.StringVar(&options.GraphDriverName, "storage-driver", "", "storage driver")
|
||||
flag.BoolVar(&debug, "debug", false, "turn on debug logging")
|
||||
flag.Parse()
|
||||
if options.GraphRoot != "" || options.RunRoot != "" || options.GraphDriverName != "" {
|
||||
storeOptions = options
|
||||
}
|
||||
if buildah.InitReexec() {
|
||||
return
|
||||
}
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestGetStore(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.String("root", "", "path to the directory in which data, including images, is stored")
|
||||
globalSet.String("runroot", "", "path to the directory in which state is stored")
|
||||
globalSet.String("storage-driver", "", "storage driver")
|
||||
globalCtx := cli.NewContext(nil, globalSet, nil)
|
||||
globalCtx.GlobalSet("root", storeOptions.GraphRoot)
|
||||
globalCtx.GlobalSet("runroot", storeOptions.RunRoot)
|
||||
globalCtx.GlobalSet("storage-driver", storeOptions.GraphDriverName)
|
||||
command := cli.Command{Name: "TestGetStore"}
|
||||
c := cli.NewContext(nil, set, globalCtx)
|
||||
c.Command = command
|
||||
|
||||
_, err := getStore(c)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSize(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, err = getDateAndDigestAndSize(images[0], store)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func failTestIfNotRoot(t *testing.T) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
t.Log("Could not determine user. Running without root may cause tests to fail")
|
||||
} else if u.Uid != "0" {
|
||||
t.Fatal("tests will fail unless run as root")
|
||||
}
|
||||
}
|
||||
|
||||
func pullTestImage(t *testing.T, imageName string) (string, error) {
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
options := buildah.BuilderOptions{
|
||||
FromImage: imageName,
|
||||
SignaturePolicyPath: signaturePolicyPath,
|
||||
}
|
||||
|
||||
b, err := buildah.NewBuilder(store, options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := b.FromImageID
|
||||
err = b.Delete()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
@@ -3,10 +3,10 @@ package main
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -18,58 +18,58 @@ const (
|
||||
|
||||
var (
|
||||
configFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "author",
|
||||
Usage: "image author contact `information`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "created-by",
|
||||
Usage: "`description` of how the image was created",
|
||||
Value: DefaultCreatedBy,
|
||||
cli.StringSliceFlag{
|
||||
Name: "annotation, a",
|
||||
Usage: "add `annotation` e.g. annotation=value, for the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "arch",
|
||||
Usage: "`architecture` of the target image",
|
||||
Usage: "set `architecture` of the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "os",
|
||||
Usage: "`operating system` of the target image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "`user` to run containers based on image as",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "port, p",
|
||||
Usage: "`port` to expose when running containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "`environment variable` to set when running containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "entrypoint",
|
||||
Usage: "`entry point` for containers based on image",
|
||||
Name: "author",
|
||||
Usage: "set image author contact `information`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cmd",
|
||||
Usage: "`command` for containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "`volume` to create for containers based on image",
|
||||
Usage: "sets the default `command` to run for containers based on the image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "workingdir",
|
||||
Usage: "working `directory` for containers based on image",
|
||||
Name: "created-by",
|
||||
Usage: "add `description` of how the image was created",
|
||||
Value: DefaultCreatedBy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "entrypoint",
|
||||
Usage: "set `entry point` for containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "env, e",
|
||||
Usage: "add `environment variable` to be set when running containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "label, l",
|
||||
Usage: "image configuration `label` e.g. label=value",
|
||||
Usage: "add image configuration `label` e.g. label=value",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "os",
|
||||
Usage: "set `operating system` of the target image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "annotation, a",
|
||||
Usage: "`annotation` e.g. annotation=value, for the target image",
|
||||
Name: "port, p",
|
||||
Usage: "add `port` to expose when running containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Usage: "set default `user` to run inside containers based on image",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "add default `volume` path to be created for containers based on image",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "workingdir",
|
||||
Usage: "set working `directory` for containers based on image",
|
||||
},
|
||||
}
|
||||
configDescription = "Modifies the configuration values which will be saved to the image"
|
||||
@@ -171,6 +171,9 @@ func configCmd(c *cli.Context) error {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
name := args[0]
|
||||
if err := validateFlags(c, configFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,18 +1,66 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type jsonContainer struct {
|
||||
ID string `json:"id"`
|
||||
Builder bool `json:"builder"`
|
||||
ImageID string `json:"imageid"`
|
||||
ImageName string `json:"imagename"`
|
||||
ContainerName string `json:"containername"`
|
||||
}
|
||||
|
||||
type containerOutputParams struct {
|
||||
ContainerID string
|
||||
Builder string
|
||||
ImageID string
|
||||
ImageName string
|
||||
ContainerName string
|
||||
}
|
||||
|
||||
type containerOptions struct {
|
||||
all bool
|
||||
format string
|
||||
json bool
|
||||
noHeading bool
|
||||
noTruncate bool
|
||||
quiet bool
|
||||
}
|
||||
|
||||
type containerFilterParams struct {
|
||||
id string
|
||||
name string
|
||||
ancestor string
|
||||
}
|
||||
|
||||
var (
|
||||
containersFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only container IDs",
|
||||
Name: "all, a",
|
||||
Usage: "also list non-buildah containers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter, f",
|
||||
Usage: "filter output based on conditions provided",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print containers using a Go template",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "noheading, n",
|
||||
@@ -22,6 +70,10 @@ var (
|
||||
Name: "notruncate",
|
||||
Usage: "do not truncate output",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only container IDs",
|
||||
},
|
||||
}
|
||||
containersDescription = "Lists containers which appear to be " + buildah.Package + " working containers, their\n names and IDs, and the names and IDs of the images from which they were\n initialized"
|
||||
containersCommand = cli.Command{
|
||||
@@ -35,49 +87,231 @@ var (
|
||||
)
|
||||
|
||||
func containersCmd(c *cli.Context) error {
|
||||
if err := validateFlags(c, containersFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
if c.IsSet("quiet") && c.IsSet("format") {
|
||||
return errors.Errorf("quiet and format are mutually exclusive")
|
||||
}
|
||||
noheading := false
|
||||
if c.IsSet("noheading") {
|
||||
noheading = c.Bool("noheading")
|
||||
|
||||
opts := containerOptions{
|
||||
all: c.Bool("all"),
|
||||
format: c.String("format"),
|
||||
json: c.Bool("json"),
|
||||
noHeading: c.Bool("noheading"),
|
||||
noTruncate: c.Bool("notruncate"),
|
||||
quiet: c.Bool("quiet"),
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("notruncate") {
|
||||
truncate = !c.Bool("notruncate")
|
||||
|
||||
var params *containerFilterParams
|
||||
if c.IsSet("filter") {
|
||||
params, err = parseCtrFilter(c.String("filter"))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.noHeading && !opts.quiet && opts.format == "" && !opts.json {
|
||||
containerOutputHeader(!opts.noTruncate)
|
||||
}
|
||||
|
||||
return outputContainers(store, opts, params)
|
||||
}
|
||||
|
||||
func outputContainers(store storage.Store, opts containerOptions, params *containerFilterParams) error {
|
||||
seenImages := make(map[string]string)
|
||||
imageNameForID := func(id string) string {
|
||||
if id == "" {
|
||||
return buildah.BaseImageFakeName
|
||||
}
|
||||
imageName, ok := seenImages[id]
|
||||
if ok {
|
||||
return imageName
|
||||
}
|
||||
img, err2 := store.Image(id)
|
||||
if err2 == nil && len(img.Names) > 0 {
|
||||
seenImages[id] = img.Names[0]
|
||||
}
|
||||
return seenImages[id]
|
||||
}
|
||||
|
||||
builders, err := openBuilders(store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build containers")
|
||||
}
|
||||
if len(builders) > 0 && !noheading && !quiet {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %-12s %-10s %s\n", "CONTAINER ID", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %-64s %-10s %s\n", "CONTAINER ID", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
var (
|
||||
containerOutput []containerOutputParams
|
||||
JSONContainers []jsonContainer
|
||||
)
|
||||
if !opts.all {
|
||||
// only output containers created by buildah
|
||||
for _, builder := range builders {
|
||||
image := imageNameForID(builder.FromImageID)
|
||||
if !matchesCtrFilter(builder.ContainerID, builder.Container, builder.FromImageID, image, params) {
|
||||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: builder.ContainerID,
|
||||
Builder: true,
|
||||
ImageID: builder.FromImageID,
|
||||
ImageName: image,
|
||||
ContainerName: builder.Container})
|
||||
continue
|
||||
}
|
||||
output := containerOutputParams{
|
||||
ContainerID: builder.ContainerID,
|
||||
Builder: " *",
|
||||
ImageID: builder.FromImageID,
|
||||
ImageName: image,
|
||||
ContainerName: builder.Container,
|
||||
}
|
||||
containerOutput = append(containerOutput, output)
|
||||
}
|
||||
} else {
|
||||
// output all containers currently in storage
|
||||
builderMap := make(map[string]struct{})
|
||||
for _, builder := range builders {
|
||||
builderMap[builder.ContainerID] = struct{}{}
|
||||
}
|
||||
containers, err2 := store.Containers()
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading list of all containers")
|
||||
}
|
||||
for _, container := range containers {
|
||||
name := ""
|
||||
if len(container.Names) > 0 {
|
||||
name = container.Names[0]
|
||||
}
|
||||
_, ours := builderMap[container.ID]
|
||||
builder := ""
|
||||
if ours {
|
||||
builder = " *"
|
||||
}
|
||||
if !matchesCtrFilter(container.ID, name, container.ImageID, imageNameForID(container.ImageID), params) {
|
||||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: container.ID,
|
||||
Builder: ours,
|
||||
ImageID: container.ImageID,
|
||||
ImageName: imageNameForID(container.ImageID),
|
||||
ContainerName: name})
|
||||
}
|
||||
output := containerOutputParams{
|
||||
ContainerID: container.ID,
|
||||
Builder: builder,
|
||||
ImageID: container.ImageID,
|
||||
ImageName: imageNameForID(container.ImageID),
|
||||
ContainerName: name,
|
||||
}
|
||||
containerOutput = append(containerOutput, output)
|
||||
}
|
||||
}
|
||||
for _, builder := range builders {
|
||||
if builder.FromImage == "" {
|
||||
builder.FromImage = buildah.BaseImageFakeName
|
||||
}
|
||||
if quiet {
|
||||
fmt.Printf("%s\n", builder.ContainerID)
|
||||
} else {
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %-12.12s %-10s %s\n", builder.ContainerID, builder.FromImageID, builder.FromImage, builder.Container)
|
||||
} else {
|
||||
fmt.Printf("%-64s %-64s %-10s %s\n", builder.ContainerID, builder.FromImageID, builder.FromImage, builder.Container)
|
||||
}
|
||||
if opts.json {
|
||||
data, err := json.MarshalIndent(JSONContainers, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s\n", data)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, ctr := range containerOutput {
|
||||
if opts.quiet {
|
||||
fmt.Printf("%-64s\n", ctr.ContainerID)
|
||||
continue
|
||||
}
|
||||
if opts.format != "" {
|
||||
if err := containerOutputUsingTemplate(opts.format, ctr); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
containerOutputUsingFormatString(!opts.noTruncate, ctr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerOutputUsingTemplate(format string, params containerOutputParams) error {
|
||||
tmpl, err := template.New("container").Parse(format)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Template parsing error")
|
||||
}
|
||||
|
||||
err = tmpl.Execute(os.Stdout, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerOutputUsingFormatString(truncate bool, params containerOutputParams) {
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
|
||||
}
|
||||
}
|
||||
|
||||
func containerOutputHeader(truncate bool) {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
}
|
||||
}
|
||||
|
||||
func parseCtrFilter(filter string) (*containerFilterParams, error) {
|
||||
params := new(containerFilterParams)
|
||||
filters := strings.Split(filter, ",")
|
||||
for _, param := range filters {
|
||||
pair := strings.SplitN(param, "=", 2)
|
||||
if len(pair) != 2 {
|
||||
return nil, errors.Errorf("incorrect filter value %q, should be of form filter=value", param)
|
||||
}
|
||||
switch strings.TrimSpace(pair[0]) {
|
||||
case "id":
|
||||
params.id = pair[1]
|
||||
case "name":
|
||||
params.name = pair[1]
|
||||
case "ancestor":
|
||||
params.ancestor = pair[1]
|
||||
default:
|
||||
return nil, errors.Errorf("invalid filter %q", pair[0])
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func matchesCtrName(ctrName, argName string) bool {
|
||||
return strings.Contains(ctrName, argName)
|
||||
}
|
||||
|
||||
func matchesAncestor(imgName, imgID, argName string) bool {
|
||||
if matchesID(imgID, argName) {
|
||||
return true
|
||||
}
|
||||
return matchesReference(imgName, argName)
|
||||
}
|
||||
|
||||
func matchesCtrFilter(ctrID, ctrName, imgID, imgName string, params *containerFilterParams) bool {
|
||||
if params == nil {
|
||||
return true
|
||||
}
|
||||
if params.id != "" && !matchesID(ctrID, params.id) {
|
||||
return false
|
||||
}
|
||||
if params.name != "" && !matchesCtrName(ctrName, params.name) {
|
||||
return false
|
||||
}
|
||||
if params.ancestor != "" && !matchesAncestor(imgName, imgID, params.ancestor) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -9,15 +9,22 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultRegistry is a prefix that we apply to an image name if we
|
||||
// can't find one in the local Store, in order to generate a source
|
||||
// reference for the image that we can then copy to the local Store.
|
||||
DefaultRegistry = "docker://"
|
||||
)
|
||||
|
||||
var (
|
||||
fromFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at the specified path to access the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "`name` for the working container",
|
||||
@@ -28,20 +35,19 @@ var (
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "pull-always",
|
||||
Usage: "pull the image even if one with the same name is already present",
|
||||
Usage: "pull the image even if named image is present in store (supersedes pull option)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registry",
|
||||
Usage: "`prefix` to prepend to the image name in order to pull the image",
|
||||
Value: DefaultRegistry,
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pulling images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pulling images",
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
fromDescription = "Creates a new working container, either from scratch or using a specified\n image as a starting point"
|
||||
@@ -57,7 +63,6 @@ var (
|
||||
)
|
||||
|
||||
func fromCmd(c *cli.Context) error {
|
||||
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
return errors.Errorf("an image name (or \"scratch\") must be specified")
|
||||
@@ -65,42 +70,24 @@ func fromCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
image := args[0]
|
||||
if err := validateFlags(c, fromFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registry := DefaultRegistry
|
||||
if c.IsSet("registry") {
|
||||
registry = c.String("registry")
|
||||
}
|
||||
pull := true
|
||||
if c.IsSet("pull") {
|
||||
pull = c.BoolT("pull")
|
||||
}
|
||||
pullAlways := false
|
||||
if c.IsSet("pull-always") {
|
||||
pull = c.Bool("pull-always")
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
pullPolicy := buildah.PullNever
|
||||
if pull {
|
||||
if c.BoolT("pull") {
|
||||
pullPolicy = buildah.PullIfMissing
|
||||
}
|
||||
if pullAlways {
|
||||
if c.Bool("pull-always") {
|
||||
pullPolicy = buildah.PullAlways
|
||||
}
|
||||
|
||||
name := ""
|
||||
if c.IsSet("name") {
|
||||
name = c.String("name")
|
||||
}
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
signaturePolicy := c.String("signature-policy")
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
@@ -108,13 +95,14 @@ func fromCmd(c *cli.Context) error {
|
||||
}
|
||||
|
||||
options := buildah.BuilderOptions{
|
||||
FromImage: image,
|
||||
Container: name,
|
||||
PullPolicy: pullPolicy,
|
||||
Registry: registry,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
FromImage: args[0],
|
||||
Container: c.String("name"),
|
||||
PullPolicy: pullPolicy,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
SystemContext: systemContext,
|
||||
DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
|
||||
@@ -2,26 +2,76 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
type jsonImage struct {
|
||||
ID string `json:"id"`
|
||||
Names []string `json:"names"`
|
||||
}
|
||||
|
||||
type imageOutputParams struct {
|
||||
ID string
|
||||
Name string
|
||||
Digest string
|
||||
CreatedAt string
|
||||
Size string
|
||||
}
|
||||
|
||||
type filterParams struct {
|
||||
dangling string
|
||||
label string
|
||||
beforeImage string // Images are sorted by date, so we can just output until we see the image
|
||||
sinceImage string // Images are sorted by date, so we can just output until we don't see the image
|
||||
beforeDate time.Time
|
||||
sinceDate time.Time
|
||||
referencePattern string
|
||||
}
|
||||
|
||||
var (
|
||||
imagesFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only image IDs",
|
||||
Name: "digests",
|
||||
Usage: "show digests",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter, f",
|
||||
Usage: "filter output based on conditions provided",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print images using a Go template",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "noheading, n",
|
||||
Usage: "do not print column headings",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "notruncate",
|
||||
Name: "no-trunc, notruncate",
|
||||
Usage: "do not truncate output",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "display only image IDs",
|
||||
},
|
||||
}
|
||||
|
||||
imagesDescription = "Lists locally stored images."
|
||||
imagesCommand = cli.Command{
|
||||
Name: "images",
|
||||
@@ -34,52 +84,310 @@ var (
|
||||
)
|
||||
|
||||
func imagesCmd(c *cli.Context) error {
|
||||
if err := validateFlags(c, imagesFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
}
|
||||
noheading := false
|
||||
if c.IsSet("noheading") {
|
||||
noheading = c.Bool("noheading")
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("notruncate") {
|
||||
truncate = !c.Bool("notruncate")
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
|
||||
if len(images) > 0 && !noheading && !quiet {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %s\n", "IMAGE ID", "IMAGE NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %s\n", "IMAGE ID", "IMAGE NAME")
|
||||
}
|
||||
if c.IsSet("quiet") && c.IsSet("format") {
|
||||
return errors.Errorf("quiet and format are mutually exclusive")
|
||||
}
|
||||
for _, image := range images {
|
||||
if quiet {
|
||||
fmt.Printf("%s\n", image.ID)
|
||||
continue
|
||||
|
||||
quiet := c.Bool("quiet")
|
||||
truncate := !c.Bool("no-trunc")
|
||||
digests := c.Bool("digests")
|
||||
hasTemplate := c.IsSet("format")
|
||||
|
||||
name := ""
|
||||
if len(c.Args()) == 1 {
|
||||
name = c.Args().Get(0)
|
||||
} else if len(c.Args()) > 1 {
|
||||
return errors.New("'buildah images' requires at most 1 argument")
|
||||
}
|
||||
if c.IsSet("json") {
|
||||
JSONImages := []jsonImage{}
|
||||
for _, image := range images {
|
||||
JSONImages = append(JSONImages, jsonImage{ID: image.ID, Names: image.Names})
|
||||
}
|
||||
names := []string{""}
|
||||
if len(image.Names) > 0 {
|
||||
names = image.Names
|
||||
data, err2 := json.MarshalIndent(JSONImages, "", " ")
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
for _, name := range names {
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %s\n", image.ID, name)
|
||||
} else {
|
||||
fmt.Printf("%-64s %s\n", image.ID, name)
|
||||
}
|
||||
fmt.Printf("%s\n", data)
|
||||
return nil
|
||||
}
|
||||
var params *filterParams
|
||||
if c.IsSet("filter") {
|
||||
params, err = parseFilter(store, images, c.String("filter"))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
if len(images) > 0 && !c.Bool("noheading") && !quiet && !hasTemplate {
|
||||
outputHeader(truncate, digests)
|
||||
}
|
||||
|
||||
return outputImages(images, c.String("format"), store, params, name, hasTemplate, truncate, digests, quiet)
|
||||
}
|
||||
|
||||
func parseFilter(store storage.Store, images []storage.Image, filter string) (*filterParams, error) {
|
||||
params := new(filterParams)
|
||||
filterStrings := strings.Split(filter, ",")
|
||||
for _, param := range filterStrings {
|
||||
pair := strings.SplitN(param, "=", 2)
|
||||
switch strings.TrimSpace(pair[0]) {
|
||||
case "dangling":
|
||||
if pair[1] == "true" || pair[1] == "false" {
|
||||
params.dangling = pair[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1])
|
||||
}
|
||||
case "label":
|
||||
params.label = pair[1]
|
||||
case "before":
|
||||
beforeDate, err := setFilterDate(store, images, pair[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no such id: %s", pair[0])
|
||||
}
|
||||
params.beforeDate = beforeDate
|
||||
params.beforeImage = pair[1]
|
||||
case "since":
|
||||
sinceDate, err := setFilterDate(store, images, pair[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("no such id: %s", pair[0])
|
||||
}
|
||||
params.sinceDate = sinceDate
|
||||
params.sinceImage = pair[1]
|
||||
case "reference":
|
||||
params.referencePattern = pair[1]
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid filter: '%s'", pair[0])
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func setFilterDate(store storage.Store, images []storage.Image, imgName string) (time.Time, error) {
|
||||
for _, image := range images {
|
||||
for _, name := range image.Names {
|
||||
if matchesReference(name, imgName) {
|
||||
// Set the date to this image
|
||||
ref, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error parsing reference to image %q: %v", image.ID, err)
|
||||
}
|
||||
img, err := ref.NewImage(nil)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error reading image %q: %v", image.ID, err)
|
||||
}
|
||||
defer img.Close()
|
||||
inspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error inspecting image %q: %v", image.ID, err)
|
||||
}
|
||||
date := inspect.Created
|
||||
return date, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("Could not locate image %q", imgName)
|
||||
}
|
||||
|
||||
func outputHeader(truncate, digests bool) {
|
||||
if truncate {
|
||||
fmt.Printf("%-20s %-56s ", "IMAGE ID", "IMAGE NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %-56s ", "IMAGE ID", "IMAGE NAME")
|
||||
}
|
||||
|
||||
if digests {
|
||||
fmt.Printf("%-71s ", "DIGEST")
|
||||
}
|
||||
|
||||
fmt.Printf("%-22s %s\n", "CREATED AT", "SIZE")
|
||||
}
|
||||
|
||||
func outputImages(images []storage.Image, format string, store storage.Store, filters *filterParams, argName string, hasTemplate, truncate, digests, quiet bool) error {
|
||||
for _, image := range images {
|
||||
createdTime := image.Created
|
||||
|
||||
inspectedTime, digest, size, _ := getDateAndDigestAndSize(image, store)
|
||||
if !inspectedTime.IsZero() {
|
||||
if createdTime != inspectedTime {
|
||||
logrus.Debugf("image record and configuration disagree on the image's creation time for %q, using the one from the configuration", image)
|
||||
createdTime = inspectedTime
|
||||
}
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
if len(image.Names) > 0 {
|
||||
names = image.Names
|
||||
} else {
|
||||
// images without names should be printed with "<none>" as the image name
|
||||
names = append(names, "<none>")
|
||||
}
|
||||
for _, name := range names {
|
||||
if !matchesFilter(image, store, name, filters) || !matchesReference(name, argName) {
|
||||
continue
|
||||
}
|
||||
if quiet {
|
||||
fmt.Printf("%-64s\n", image.ID)
|
||||
// We only want to print each id once
|
||||
break
|
||||
}
|
||||
|
||||
params := imageOutputParams{
|
||||
ID: image.ID,
|
||||
Name: name,
|
||||
Digest: digest,
|
||||
CreatedAt: createdTime.Format("Jan 2, 2006 15:04"),
|
||||
Size: formattedSize(size),
|
||||
}
|
||||
if hasTemplate {
|
||||
if err := outputUsingTemplate(format, params); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
outputUsingFormatString(truncate, digests, params)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchesFilter(image storage.Image, store storage.Store, name string, params *filterParams) bool {
|
||||
if params == nil {
|
||||
return true
|
||||
}
|
||||
if params.dangling != "" && !matchesDangling(name, params.dangling) {
|
||||
return false
|
||||
} else if params.label != "" && !matchesLabel(image, store, params.label) {
|
||||
return false
|
||||
} else if params.beforeImage != "" && !matchesBeforeImage(image, name, params) {
|
||||
return false
|
||||
} else if params.sinceImage != "" && !matchesSinceImage(image, name, params) {
|
||||
return false
|
||||
} else if params.referencePattern != "" && !matchesReference(name, params.referencePattern) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func matchesDangling(name string, dangling string) bool {
|
||||
if dangling == "false" && name != "<none>" {
|
||||
return true
|
||||
} else if dangling == "true" && name == "<none>" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchesLabel(image storage.Image, store storage.Store, label string) bool {
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
img, err := storeRef.NewImage(nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer img.Close()
|
||||
info, err := img.Inspect()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
pair := strings.SplitN(label, "=", 2)
|
||||
for key, value := range info.Labels {
|
||||
if key == pair[0] {
|
||||
if len(pair) == 2 {
|
||||
if value == pair[1] {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesBeforeImage(image storage.Image, name string, params *filterParams) bool {
|
||||
return image.Created.IsZero() || image.Created.Before(params.beforeDate)
|
||||
}
|
||||
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesSinceImage(image storage.Image, name string, params *filterParams) bool {
|
||||
return image.Created.IsZero() || image.Created.After(params.sinceDate)
|
||||
}
|
||||
|
||||
func matchesID(imageID, argID string) bool {
|
||||
return strings.HasPrefix(imageID, argID)
|
||||
}
|
||||
|
||||
func matchesReference(name, argName string) bool {
|
||||
if argName == "" {
|
||||
return true
|
||||
}
|
||||
splitName := strings.Split(name, ":")
|
||||
// If the arg contains a tag, we handle it differently than if it does not
|
||||
if strings.Contains(argName, ":") {
|
||||
splitArg := strings.Split(argName, ":")
|
||||
return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])
|
||||
}
|
||||
return strings.HasSuffix(splitName[0], argName)
|
||||
}
|
||||
|
||||
func formattedSize(size int64) string {
|
||||
suffixes := [5]string{"B", "KB", "MB", "GB", "TB"}
|
||||
|
||||
count := 0
|
||||
formattedSize := float64(size)
|
||||
for formattedSize >= 1024 && count < 4 {
|
||||
formattedSize /= 1024
|
||||
count++
|
||||
}
|
||||
return fmt.Sprintf("%.4g %s", formattedSize, suffixes[count])
|
||||
}
|
||||
|
||||
func outputUsingTemplate(format string, params imageOutputParams) error {
|
||||
tmpl, err := template.New("image").Parse(format)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Template parsing error")
|
||||
}
|
||||
|
||||
err = tmpl.Execute(os.Stdout, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func outputUsingFormatString(truncate, digests bool, params imageOutputParams) {
|
||||
if truncate {
|
||||
fmt.Printf("%-20.12s %-56s", params.ID, params.Name)
|
||||
} else {
|
||||
fmt.Printf("%-64s %-56s", params.ID, params.Name)
|
||||
}
|
||||
|
||||
if digests {
|
||||
fmt.Printf(" %-64s", params.Digest)
|
||||
}
|
||||
fmt.Printf(" %-22s %s\n", params.CreatedAt, params.Size)
|
||||
}
|
||||
|
||||
713
cmd/buildah/images_test.go
Normal file
@@ -0,0 +1,713 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
)
|
||||
|
||||
func TestTemplateOutputBlankTemplate(t *testing.T) {
|
||||
params := imageOutputParams{
|
||||
ID: "0123456789abcdef",
|
||||
Name: "test/image:latest",
|
||||
Digest: "sha256:012345789abcdef012345789abcdef012345789abcdef012345789abcdef",
|
||||
CreatedAt: "Jan 01 2016 10:45",
|
||||
Size: "97 KB",
|
||||
}
|
||||
|
||||
err := outputUsingTemplate("", params)
|
||||
//Output: Words
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTemplateOutputValidTemplate(t *testing.T) {
|
||||
params := imageOutputParams{
|
||||
ID: "0123456789abcdef",
|
||||
Name: "test/image:latest",
|
||||
Digest: "sha256:012345789abcdef012345789abcdef012345789abcdef012345789abcdef",
|
||||
CreatedAt: "Jan 01 2016 10:45",
|
||||
Size: "97 KB",
|
||||
}
|
||||
|
||||
templateString := "{{.ID}}"
|
||||
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputUsingTemplate(templateString, params)
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(params.ID) {
|
||||
t.Errorf("Error with template output:\nExpected: %s\nReceived: %s\n", params.ID, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatStringOutput(t *testing.T) {
|
||||
params := imageOutputParams{
|
||||
ID: "012345789abcdef",
|
||||
Name: "test/image:latest",
|
||||
Digest: "sha256:012345789abcdef012345789abcdef012345789abcdef012345789abcdef",
|
||||
CreatedAt: "Jan 01 2016 10:45",
|
||||
Size: "97 KB",
|
||||
}
|
||||
|
||||
output := captureOutput(func() {
|
||||
outputUsingFormatString(true, true, params)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-20.12s %-56s %-64s %-22s %s\n", params.ID, params.Name, params.Digest, params.CreatedAt, params.Size)
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeFormatting(t *testing.T) {
|
||||
size := formattedSize(0)
|
||||
if size != "0 B" {
|
||||
t.Errorf("Error formatting size: expected '%s' got '%s'", "0 B", size)
|
||||
}
|
||||
|
||||
size = formattedSize(1024)
|
||||
if size != "1 KB" {
|
||||
t.Errorf("Error formatting size: expected '%s' got '%s'", "1 KB", size)
|
||||
}
|
||||
|
||||
size = formattedSize(1024 * 1024 * 1024 * 1024 * 1024)
|
||||
if size != "1024 TB" {
|
||||
t.Errorf("Error formatting size: expected '%s' got '%s'", "1024 TB", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputHeader(t *testing.T) {
|
||||
output := captureOutput(func() {
|
||||
outputHeader(true, false)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-20s %-56s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
|
||||
output = captureOutput(func() {
|
||||
outputHeader(true, true)
|
||||
})
|
||||
expectedOutput = fmt.Sprintf("%-20s %-56s %-71s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "DIGEST", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
|
||||
output = captureOutput(func() {
|
||||
outputHeader(false, false)
|
||||
})
|
||||
expectedOutput = fmt.Sprintf("%-64s %-56s %-22s %s\n", "IMAGE ID", "IMAGE NAME", "CREATED AT", "SIZE")
|
||||
if output != expectedOutput {
|
||||
t.Errorf("Error outputting header:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchWithTag(t *testing.T) {
|
||||
isMatch := matchesReference("docker.io/kubernetes/pause:latest", "pause:latest")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got not match")
|
||||
}
|
||||
|
||||
isMatch = matchesReference("docker.io/kubernetes/pause:latest", "kubernetes/pause:latest")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got no match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoMatchesReferenceWithTag(t *testing.T) {
|
||||
isMatch := matchesReference("docker.io/kubernetes/pause:latest", "redis:latest")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
}
|
||||
|
||||
isMatch = matchesReference("docker.io/kubernetes/pause:latest", "kubernetes/redis:latest")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesReferenceWithoutTag(t *testing.T) {
|
||||
isMatch := matchesReference("docker.io/kubernetes/pause:latest", "pause")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got not match")
|
||||
}
|
||||
|
||||
isMatch = matchesReference("docker.io/kubernetes/pause:latest", "kubernetes/pause")
|
||||
if !isMatch {
|
||||
t.Error("expected match, got no match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoMatchesReferenceWithoutTag(t *testing.T) {
|
||||
isMatch := matchesReference("docker.io/kubernetes/pause:latest", "redis")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
}
|
||||
|
||||
isMatch = matchesReference("docker.io/kubernetes/pause:latest", "kubernetes/redis")
|
||||
if isMatch {
|
||||
t.Error("expected no match, got match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputImagesQuietTruncated(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
// Tests quiet and truncated output
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "", store, nil, "", false, true, false, true)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-64s\n", images[0].ID)
|
||||
if err != nil {
|
||||
t.Error("quiet/truncated output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Errorf("quiet/truncated output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputImagesQuietNotTruncated(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Tests quiet and non-truncated output
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "", store, nil, "", false, false, false, true)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-64s\n", images[0].ID)
|
||||
if err != nil {
|
||||
t.Error("quiet/non-truncated output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Errorf("quiet/non-truncated output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputImagesFormatString(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Tests output with format template
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "{{.ID}}", store, nil, "", true, true, false, false)
|
||||
})
|
||||
expectedOutput := images[0].ID
|
||||
if err != nil {
|
||||
t.Error("format string output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Errorf("format string output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputImagesFormatTemplate(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Tests quiet and non-truncated output
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "", store, nil, "", false, false, false, true)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-64s\n", images[0].ID)
|
||||
if err != nil {
|
||||
t.Error("format template output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Errorf("format template output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputImagesArgNoMatch(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Tests output with an arg name that does not match. Args ending in ":" cannot match
|
||||
// because all images in the repository must have a tag, and here the tag is an
|
||||
// empty string
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "", store, nil, "foo:", false, true, false, false)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("")
|
||||
if err != nil {
|
||||
t.Error("arg no match output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Error("arg no match output should be empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputMultipleImages(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull two images so that we know we have at least two
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
_, err = pullTestImage(t, "alpine:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// Tests quiet and truncated output
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:2], "", store, nil, "", false, true, false, true)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%-64s\n%-64s\n", images[0].ID, images[1].ID)
|
||||
if err != nil {
|
||||
t.Error("multi-image output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
t.Errorf("multi-image output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilterAllParams(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=true,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
|
||||
params, err := parseFilter(store, images, label)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing filter: %v", err)
|
||||
}
|
||||
|
||||
ref, err := is.Transport.ParseStoreReference(store, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing store reference: %v", err)
|
||||
}
|
||||
img, err := ref.NewImage(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading image from store: %v", err)
|
||||
}
|
||||
defer img.Close()
|
||||
inspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
t.Fatalf("error inspecting image in store: %v", err)
|
||||
}
|
||||
|
||||
expectedParams := &filterParams{
|
||||
dangling: "true",
|
||||
label: "a=b",
|
||||
beforeImage: "busybox:latest",
|
||||
beforeDate: inspect.Created,
|
||||
sinceImage: "busybox:latest",
|
||||
sinceDate: inspect.Created,
|
||||
referencePattern: "abcdef",
|
||||
}
|
||||
if *params != *expectedParams {
|
||||
t.Errorf("filter did not return expected result\n\tExpected: %v\n\tReceived: %v", expectedParams, params)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilterInvalidDangling(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=NO,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || err.Error() != "invalid filter: 'dangling=[NO]'" {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilterInvalidBefore(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=false,label=a=b,before=:,since=busybox:latest,reference=abcdef"
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || !strings.Contains(err.Error(), "no such id") {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilterInvalidSince(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "dangling=false,label=a=b,before=busybox:latest,since=:,reference=abcdef"
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || !strings.Contains(err.Error(), "no such id") {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFilterInvalidFilter(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
label := "foo=bar"
|
||||
_, err = parseFilter(store, images, label)
|
||||
if err == nil || err.Error() != "invalid filter: 'foo'" {
|
||||
t.Fatalf("expected error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesDangingTrue(t *testing.T) {
|
||||
if !matchesDangling("<none>", "true") {
|
||||
t.Error("matchesDangling() should return true with dangling=true and name=<none>")
|
||||
}
|
||||
|
||||
if !matchesDangling("hello", "false") {
|
||||
t.Error("matchesDangling() should return true with dangling=false and name='hello'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesDangingFalse(t *testing.T) {
|
||||
if matchesDangling("hello", "true") {
|
||||
t.Error("matchesDangling() should return false with dangling=true and name=hello")
|
||||
}
|
||||
|
||||
if matchesDangling("<none>", "false") {
|
||||
t.Error("matchesDangling() should return false with dangling=false and name=<none>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesLabelTrue(t *testing.T) {
|
||||
//TODO: How do I implement this?
|
||||
}
|
||||
|
||||
func TestMatchesLabelFalse(t *testing.T) {
|
||||
// TODO: How do I implement this?
|
||||
}
|
||||
|
||||
func TestMatchesBeforeImageTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// by default, params.seenImage is false
|
||||
params := new(filterParams)
|
||||
params.beforeDate = time.Now()
|
||||
params.beforeImage = "foo:bar"
|
||||
if !matchesBeforeImage(images[0], ":", params) {
|
||||
t.Error("should have matched beforeImage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesBeforeImageFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// by default, params.seenImage is false
|
||||
params := new(filterParams)
|
||||
params.beforeDate = time.Time{}
|
||||
params.beforeImage = "foo:bar"
|
||||
// Should return false because the image has been seen
|
||||
if matchesBeforeImage(images[0], ":", params) {
|
||||
t.Error("should not have matched beforeImage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesSinceeImageTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// by default, params.seenImage is false
|
||||
params := new(filterParams)
|
||||
params.sinceDate = time.Time{}
|
||||
params.sinceImage = "foo:bar"
|
||||
if !matchesSinceImage(images[0], ":", params) {
|
||||
t.Error("should have matched SinceImage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchesSinceImageFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
// Pull an image so that we know we have at least one
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
|
||||
// by default, params.seenImage is false
|
||||
params := new(filterParams)
|
||||
params.sinceDate = time.Now()
|
||||
params.sinceImage = "foo:bar"
|
||||
// Should return false because the image has been seen
|
||||
if matchesSinceImage(images[0], ":", params) {
|
||||
t.Error("should not have matched sinceImage")
|
||||
}
|
||||
|
||||
if matchesSinceImage(images[0], "foo:bar", params) {
|
||||
t.Error("image should have been filtered out")
|
||||
}
|
||||
}
|
||||
|
||||
func captureOutputWithError(f func() error) (string, error) {
|
||||
old := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
err := f()
|
||||
|
||||
w.Close()
|
||||
os.Stdout = old
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
// Captures output so that it can be compared to expected values
|
||||
func captureOutput(f func()) string {
|
||||
old := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
f()
|
||||
|
||||
w.Close()
|
||||
os.Stdout = old
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
return buf.String()
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,14 +22,15 @@ ID: {{.ContainerID}}
|
||||
|
||||
var (
|
||||
inspectFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "type, t",
|
||||
Usage: "look at the item of the specified `type` (container or image) and name",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "use `format` as a Go template to format the output",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "type, t",
|
||||
Usage: "look at the item of the specified `type` (container or image) and name",
|
||||
Value: inspectTypeContainer,
|
||||
},
|
||||
}
|
||||
inspectDescription = "Inspects a build container's or built image's configuration."
|
||||
inspectCommand = cli.Command{
|
||||
@@ -51,23 +53,18 @@ func inspectCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
|
||||
itemType := inspectTypeContainer
|
||||
if c.IsSet("type") {
|
||||
itemType = c.String("type")
|
||||
if err := validateFlags(c, inspectFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
switch itemType {
|
||||
case inspectTypeContainer:
|
||||
case inspectTypeImage:
|
||||
default:
|
||||
return errors.Errorf("the only recognized types are %q and %q", inspectTypeContainer, inspectTypeImage)
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
format := defaultFormat
|
||||
if c.IsSet("format") {
|
||||
if c.String("format") != "" {
|
||||
format = c.String("format")
|
||||
}
|
||||
if c.String("format") != "" {
|
||||
format = c.String("format")
|
||||
}
|
||||
t := template.Must(template.New("format").Parse(format))
|
||||
|
||||
@@ -78,27 +75,41 @@ func inspectCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
switch itemType {
|
||||
switch c.String("type") {
|
||||
case inspectTypeContainer:
|
||||
builder, err = openBuilder(store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
if c.IsSet("type") {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
builder, err = openImage(systemContext, store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build object %q", name)
|
||||
}
|
||||
}
|
||||
case inspectTypeImage:
|
||||
builder, err = openImage(store, name)
|
||||
builder, err = openImage(systemContext, store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading image %q", name)
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("the only recognized types are %q and %q", inspectTypeContainer, inspectTypeImage)
|
||||
}
|
||||
|
||||
if c.IsSet("format") {
|
||||
return t.Execute(os.Stdout, builder)
|
||||
if err := t.Execute(os.Stdout, buildah.GetBuildInfo(builder)); err != nil {
|
||||
return err
|
||||
}
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(builder, "", " ")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error encoding build container as json")
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
enc.SetEscapeHTML(false)
|
||||
}
|
||||
_, err = fmt.Println(string(b))
|
||||
return err
|
||||
return enc.Encode(builder)
|
||||
}
|
||||
|
||||
@@ -4,15 +4,17 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage"
|
||||
ispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
rspecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
debug := false
|
||||
|
||||
var defaultStoreDriverOptions *cli.StringSlice
|
||||
if buildah.InitReexec() {
|
||||
return
|
||||
@@ -27,6 +29,18 @@ func main() {
|
||||
defaultStoreDriverOptions = &optionSlice
|
||||
}
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "print debugging information",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to registries.conf file (not usually used)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf-dir",
|
||||
Usage: "path to registries.conf.d directory (not usually used)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "root",
|
||||
Usage: "storage root dir",
|
||||
@@ -47,17 +61,17 @@ func main() {
|
||||
Usage: "storage driver option",
|
||||
Value: defaultStoreDriverOptions,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "print debugging information",
|
||||
cli.StringFlag{
|
||||
Name: "default-mounts-file",
|
||||
Usage: "path to default mounts file",
|
||||
Value: buildah.DefaultMountsFile,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if c.GlobalIsSet("debug") {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalBool("debug") {
|
||||
debug = true
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -88,10 +102,15 @@ func main() {
|
||||
runCommand,
|
||||
tagCommand,
|
||||
umountCommand,
|
||||
versionCommand,
|
||||
}
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
logrus.Errorf("%v", err)
|
||||
os.Exit(1)
|
||||
if debug {
|
||||
logrus.Errorf(err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
}
|
||||
cli.OsExiter(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,15 +30,15 @@ func mountCmd(c *cli.Context) error {
|
||||
if len(args) > 1 {
|
||||
return errors.Errorf("too many arguments specified")
|
||||
}
|
||||
if err := validateFlags(c, mountFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
truncate := true
|
||||
if c.IsSet("notruncate") {
|
||||
truncate = !c.Bool("notruncate")
|
||||
}
|
||||
truncate := !c.Bool("notruncate")
|
||||
|
||||
if len(args) == 1 {
|
||||
name := args[0]
|
||||
@@ -46,7 +46,7 @@ func mountCmd(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
mountPoint, err := builder.Mount("")
|
||||
mountPoint, err := builder.Mount(builder.MountLabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mounting %q container %q", name, builder.Container)
|
||||
}
|
||||
|
||||
@@ -1,38 +1,76 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
pushFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at the specified path to access the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
Usage: "don't compress layers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
Name: "format, f",
|
||||
Usage: "manifest type (oci, v2s1, or v2s2) to use when saving image using the 'dir:' transport (default is manifest type of source)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "don't output progress information when pushing images",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "signature-policy",
|
||||
Usage: "`pathname` of signature policy file (not usually used)",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when accessing the registry",
|
||||
},
|
||||
}
|
||||
pushDescription = "Pushes an image to a specified location."
|
||||
pushCommand = cli.Command{
|
||||
pushDescription = fmt.Sprintf(`
|
||||
Pushes an image to a specified location.
|
||||
|
||||
The Image "DESTINATION" uses a "transport":"details" format.
|
||||
|
||||
Supported transports:
|
||||
%s
|
||||
|
||||
See buildah-push(1) section "DESTINATION" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", "))
|
||||
|
||||
pushCommand = cli.Command{
|
||||
Name: "push",
|
||||
Usage: "Push an image to a specified location",
|
||||
Usage: "Push an image to a specified destination",
|
||||
Description: pushDescription,
|
||||
Flags: pushFlags,
|
||||
Action: pushCmd,
|
||||
ArgsUsage: "IMAGE [TRANSPORT:]IMAGE",
|
||||
ArgsUsage: "IMAGE DESTINATION",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -41,43 +79,73 @@ func pushCmd(c *cli.Context) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New("source and destination image IDs must be specified")
|
||||
}
|
||||
if err := validateFlags(c, pushFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
src := args[0]
|
||||
destSpec := args[1]
|
||||
|
||||
signaturePolicy := ""
|
||||
if c.IsSet("signature-policy") {
|
||||
signaturePolicy = c.String("signature-policy")
|
||||
}
|
||||
compress := archive.Uncompressed
|
||||
if !c.IsSet("disable-compression") || !c.Bool("disable-compression") {
|
||||
compress = archive.Gzip
|
||||
}
|
||||
quiet := false
|
||||
if c.IsSet("quiet") {
|
||||
quiet = c.Bool("quiet")
|
||||
compress := archive.Gzip
|
||||
if c.Bool("disable-compression") {
|
||||
compress = archive.Uncompressed
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest, err := alltransports.ParseImageName(destSpec)
|
||||
// add the docker:// transport to see if they neglected it.
|
||||
if err != nil {
|
||||
return err
|
||||
if strings.Contains(destSpec, "://") {
|
||||
return err
|
||||
}
|
||||
|
||||
destSpec = "docker://" + destSpec
|
||||
dest2, err2 := alltransports.ParseImageName(destSpec)
|
||||
if err2 != nil {
|
||||
return err
|
||||
}
|
||||
dest = dest2
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if c.IsSet("format") {
|
||||
switch c.String("format") {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2", "docker":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", c.String("format"))
|
||||
}
|
||||
}
|
||||
|
||||
options := buildah.PushOptions{
|
||||
Compression: compress,
|
||||
SignaturePolicyPath: signaturePolicy,
|
||||
ManifestType: manifestType,
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
Store: store,
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !quiet {
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
}
|
||||
|
||||
err = buildah.Push(src, dest, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error pushing image %q to %q", src, destSpec)
|
||||
return util.GetFailureCause(
|
||||
err,
|
||||
errors.Wrapf(err, "error pushing image %q to %q", src, destSpec),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -10,19 +11,35 @@ import (
|
||||
|
||||
var (
|
||||
rmDescription = "Removes one or more working containers, unmounting them if necessary"
|
||||
rmCommand = cli.Command{
|
||||
rmFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "remove all containers",
|
||||
},
|
||||
}
|
||||
rmCommand = cli.Command{
|
||||
Name: "rm",
|
||||
Aliases: []string{"delete"},
|
||||
Usage: "Remove one or more working containers",
|
||||
Description: rmDescription,
|
||||
Action: rmCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [...]",
|
||||
Flags: rmFlags,
|
||||
}
|
||||
)
|
||||
|
||||
// writeError writes `lastError` into `w` if not nil and return the next error `err`
|
||||
func writeError(w io.Writer, err error, lastError error) error {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(w, lastError)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func rmCmd(c *cli.Context) error {
|
||||
delContainerErrStr := "error removing container"
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
if len(args) == 0 && !c.Bool("all") {
|
||||
return errors.Errorf("container ID must be specified")
|
||||
}
|
||||
store, err := getStore(c)
|
||||
@@ -30,28 +47,36 @@ func rmCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var e error
|
||||
for _, name := range args {
|
||||
builder, err := openBuilder(store, name)
|
||||
if e == nil {
|
||||
e = err
|
||||
}
|
||||
var lastError error
|
||||
if c.Bool("all") {
|
||||
builders, err := openBuilders(store)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error reading build container %q: %v\n", name, err)
|
||||
continue
|
||||
return errors.Wrapf(err, "error reading build containers")
|
||||
}
|
||||
|
||||
id := builder.ContainerID
|
||||
err = builder.Delete()
|
||||
if e == nil {
|
||||
e = err
|
||||
for _, builder := range builders {
|
||||
id := builder.ContainerID
|
||||
if err = builder.Delete(); err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, builder.Container), lastError)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error removing container %q: %v\n", builder.Container, err)
|
||||
continue
|
||||
} else {
|
||||
for _, name := range args {
|
||||
builder, err := openBuilder(store, name)
|
||||
if err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, name), lastError)
|
||||
continue
|
||||
}
|
||||
id := builder.ContainerID
|
||||
if err = builder.Delete(); err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, name), lastError)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
|
||||
}
|
||||
|
||||
return e
|
||||
return lastError
|
||||
}
|
||||
|
||||
@@ -4,106 +4,312 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/storage"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
rmiDescription = "Removes one or more locally stored images."
|
||||
rmiCommand = cli.Command{
|
||||
rmiDescription = "removes one or more locally stored images."
|
||||
rmiFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "remove all images",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "prune, p",
|
||||
Usage: "prune dangling images",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "force, f",
|
||||
Usage: "force removal of the image and any containers using the image",
|
||||
},
|
||||
}
|
||||
rmiCommand = cli.Command{
|
||||
Name: "rmi",
|
||||
Usage: "Removes one or more images from local storage",
|
||||
Usage: "removes one or more images from local storage",
|
||||
Description: rmiDescription,
|
||||
Action: rmiCmd,
|
||||
ArgsUsage: "IMAGE-NAME-OR-ID [...]",
|
||||
Flags: rmiFlags,
|
||||
}
|
||||
)
|
||||
|
||||
func rmiCmd(c *cli.Context) error {
|
||||
force := c.Bool("force")
|
||||
removeAll := c.Bool("all")
|
||||
pruneDangling := c.Bool("prune")
|
||||
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
if len(args) == 0 && !removeAll && !pruneDangling {
|
||||
return errors.Errorf("image name or ID must be specified")
|
||||
}
|
||||
if len(args) > 0 && removeAll {
|
||||
return errors.Errorf("when using the --all switch, you may not pass any images names or IDs")
|
||||
}
|
||||
if removeAll && pruneDangling {
|
||||
return errors.Errorf("when using the --all switch, you may not use --prune switch")
|
||||
}
|
||||
|
||||
if err := validateFlags(c, rmiFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var e error
|
||||
for _, id := range args {
|
||||
// If it's an exact name or ID match with the underlying
|
||||
// storage library's information about the image, then it's
|
||||
// enough.
|
||||
_, err = store.DeleteImage(id, true)
|
||||
imagesToDelete := args[:]
|
||||
var lastError error
|
||||
|
||||
if removeAll {
|
||||
imagesToDelete, err = findAllImages(store)
|
||||
if err != nil {
|
||||
var ref types.ImageReference
|
||||
// If it's looks like a proper image reference, parse
|
||||
// it and check if it corresponds to an image that
|
||||
// actually exists.
|
||||
if ref2, err2 := alltransports.ParseImageName(id); err2 == nil {
|
||||
if img, err3 := ref2.NewImage(nil); err3 == nil {
|
||||
img.Close()
|
||||
ref = ref2
|
||||
} else {
|
||||
logrus.Debugf("error confirming presence of image %q: %v", transports.ImageName(ref2), err3)
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("error parsing %q as an image reference: %v", id, err2)
|
||||
}
|
||||
if ref == nil {
|
||||
// If it's looks like an image reference that's
|
||||
// relative to our storage, parse it and check
|
||||
// if it corresponds to an image that actually
|
||||
// exists.
|
||||
if ref2, err2 := storage.Transport.ParseStoreReference(store, id); err2 == nil {
|
||||
if img, err3 := ref2.NewImage(nil); err3 == nil {
|
||||
img.Close()
|
||||
ref = ref2
|
||||
} else {
|
||||
logrus.Debugf("error confirming presence of image %q: %v", transports.ImageName(ref2), err3)
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("error parsing %q as a store reference: %v", id, err2)
|
||||
}
|
||||
}
|
||||
if ref == nil {
|
||||
// If it might be an ID that's relative to our
|
||||
// storage, parse it and check if it
|
||||
// corresponds to an image that actually
|
||||
// exists. This _should_ be redundant, since
|
||||
// we already tried deleting the image using
|
||||
// the ID directly above, but it can't hurt,
|
||||
// either.
|
||||
if ref2, err2 := storage.Transport.ParseStoreReference(store, "@"+id); err2 == nil {
|
||||
if img, err3 := ref2.NewImage(nil); err3 == nil {
|
||||
img.Close()
|
||||
ref = ref2
|
||||
} else {
|
||||
logrus.Debugf("error confirming presence of image %q: %v", transports.ImageName(ref2), err3)
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("error parsing %q as an image reference: %v", "@"+id, err2)
|
||||
}
|
||||
}
|
||||
if ref != nil {
|
||||
err = ref.DeleteImage(nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if e == nil {
|
||||
e = err
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error removing image %q: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
|
||||
return e
|
||||
if pruneDangling {
|
||||
imagesToDelete, err = findDanglingImages(store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range imagesToDelete {
|
||||
image, err := getImage(id, store)
|
||||
if err != nil || image == nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
if err == nil {
|
||||
err = storage.ErrNotAnImage
|
||||
}
|
||||
lastError = errors.Wrapf(err, "could not get image %q", id)
|
||||
continue
|
||||
}
|
||||
if image != nil {
|
||||
ctrIDs, err := runningContainers(image, store)
|
||||
if err != nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error getting running containers for image %q", id)
|
||||
continue
|
||||
}
|
||||
if len(ctrIDs) > 0 && len(image.Names) <= 1 {
|
||||
if force {
|
||||
err = removeContainers(ctrIDs, store)
|
||||
if err != nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error removing containers %v for image %q", ctrIDs, id)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
for _, ctrID := range ctrIDs {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(storage.ErrImageUsedByContainer, "Could not remove image %q (must force) - container %q is using its reference image", id, ctrID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags
|
||||
if matchesID(image.ID, id) {
|
||||
if len(image.Names) > 1 && !force {
|
||||
return fmt.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", image.ID)
|
||||
}
|
||||
// If it is forced, we have to untag the image so that it can be deleted
|
||||
image.Names = image.Names[:0]
|
||||
} else {
|
||||
name, err2 := untagImage(id, image, store)
|
||||
if err2 != nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err2, "error removing tag %q from image %q", id, image.ID)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("untagged: %s\n", name)
|
||||
}
|
||||
|
||||
if len(image.Names) > 0 {
|
||||
continue
|
||||
}
|
||||
id, err := removeImage(image, store)
|
||||
if err != nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error removing image %q", image.ID)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
return lastError
|
||||
}
|
||||
|
||||
func getImage(id string, store storage.Store) (*storage.Image, error) {
|
||||
var ref types.ImageReference
|
||||
ref, err := properImageRef(id)
|
||||
if err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
if ref == nil {
|
||||
if ref, err = storageImageRef(store, id); err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
}
|
||||
if ref == nil {
|
||||
if ref, err = storageImageID(store, id); err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
}
|
||||
if ref != nil {
|
||||
image, err2 := is.Transport.GetStoreImage(store, ref)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error reading image using reference %q", transports.ImageName(ref))
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func untagImage(imgArg string, image *storage.Image, store storage.Store) (string, error) {
|
||||
newNames := []string{}
|
||||
removedName := ""
|
||||
for _, name := range image.Names {
|
||||
if matchesReference(name, imgArg) {
|
||||
removedName = name
|
||||
continue
|
||||
}
|
||||
newNames = append(newNames, name)
|
||||
}
|
||||
if removedName != "" {
|
||||
if err := store.SetNames(image.ID, newNames); err != nil {
|
||||
return "", errors.Wrapf(err, "error removing name %q from image %q", removedName, image.ID)
|
||||
}
|
||||
}
|
||||
return removedName, nil
|
||||
}
|
||||
|
||||
func removeImage(image *storage.Image, store storage.Store) (string, error) {
|
||||
if _, err := store.DeleteImage(image.ID, true); err != nil {
|
||||
return "", errors.Wrapf(err, "could not remove image %q", image.ID)
|
||||
}
|
||||
return image.ID, nil
|
||||
}
|
||||
|
||||
// Returns a list of running containers associated with the given ImageReference
|
||||
func runningContainers(image *storage.Image, store storage.Store) ([]string, error) {
|
||||
ctrIDs := []string{}
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ctr := range containers {
|
||||
if ctr.ImageID == image.ID {
|
||||
ctrIDs = append(ctrIDs, ctr.ID)
|
||||
}
|
||||
}
|
||||
return ctrIDs, nil
|
||||
}
|
||||
|
||||
func removeContainers(ctrIDs []string, store storage.Store) error {
|
||||
for _, ctrID := range ctrIDs {
|
||||
if err := store.DeleteContainer(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "could not remove container %q", ctrID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If it's looks like a proper image reference, parse it and check if it
|
||||
// corresponds to an image that actually exists.
|
||||
func properImageRef(id string) (types.ImageReference, error) {
|
||||
var err error
|
||||
if ref, err := alltransports.ParseImageName(id); err == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error confirming presence of image reference %q", transports.ImageName(ref))
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error parsing %q as an image reference", id)
|
||||
}
|
||||
|
||||
// If it's looks like an image reference that's relative to our storage, parse
|
||||
// it and check if it corresponds to an image that actually exists.
|
||||
func storageImageRef(store storage.Store, id string) (types.ImageReference, error) {
|
||||
var err error
|
||||
if ref, err := is.Transport.ParseStoreReference(store, id); err == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error confirming presence of storage image reference %q", transports.ImageName(ref))
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error parsing %q as a storage image reference", id)
|
||||
}
|
||||
|
||||
// If it might be an ID that's relative to our storage, truncated or not, so
|
||||
// parse it and check if it corresponds to an image that we have stored
|
||||
// locally.
|
||||
func storageImageID(store storage.Store, id string) (types.ImageReference, error) {
|
||||
var err error
|
||||
imageID := id
|
||||
if img, err := store.Image(id); err == nil && img != nil {
|
||||
imageID = img.ID
|
||||
}
|
||||
if ref, err := is.Transport.ParseStoreReference(store, imageID); err == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error confirming presence of storage image reference %q", transports.ImageName(ref))
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error parsing %q as a storage image reference: %v", id)
|
||||
}
|
||||
|
||||
// Returns a list of all existing images
|
||||
func findAllImages(store storage.Store) ([]string, error) {
|
||||
imagesToDelete := []string{}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
for _, image := range images {
|
||||
imagesToDelete = append(imagesToDelete, image.ID)
|
||||
}
|
||||
|
||||
return imagesToDelete, nil
|
||||
}
|
||||
|
||||
// Returns a list of all dangling images
|
||||
func findDanglingImages(store storage.Store) ([]string, error) {
|
||||
imagesToDelete := []string{}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
for _, image := range images {
|
||||
if len(image.Names) == 0 {
|
||||
imagesToDelete = append(imagesToDelete, image.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return imagesToDelete, nil
|
||||
}
|
||||
|
||||
141
cmd/buildah/rmi_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
)
|
||||
|
||||
func TestProperImageRefTrue(t *testing.T) {
|
||||
// Pull an image so we know we have it
|
||||
_, err := pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove")
|
||||
}
|
||||
// This should match a url path
|
||||
imgRef, err := properImageRef("docker://busybox:latest")
|
||||
if err != nil {
|
||||
t.Errorf("could not match image: %v", err)
|
||||
} else if imgRef == nil {
|
||||
t.Error("Returned nil Image Reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProperImageRefFalse(t *testing.T) {
|
||||
// Pull an image so we know we have it
|
||||
_, err := pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatal("could not pull image to remove")
|
||||
}
|
||||
// This should match a url path
|
||||
imgRef, _ := properImageRef("docker://:")
|
||||
if imgRef != nil {
|
||||
t.Error("should not have found an Image Reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageImageRefTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
imgRef, err := storageImageRef(store, "busybox")
|
||||
if err != nil {
|
||||
t.Errorf("could not match image: %v", err)
|
||||
} else if imgRef == nil {
|
||||
t.Error("Returned nil Image Reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageImageRefFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
imgRef, _ := storageImageRef(store, "")
|
||||
if imgRef != nil {
|
||||
t.Error("should not have found an Image Reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageImageIDTrue(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
_, err = pullTestImage(t, "busybox:latest")
|
||||
if err != nil {
|
||||
t.Fatalf("could not pull image to remove: %v", err)
|
||||
}
|
||||
//Somehow I have to get the id of the image I just pulled
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading images: %v", err)
|
||||
}
|
||||
id, err := captureOutputWithError(func() error {
|
||||
return outputImages(images, "", store, nil, "busybox:latest", false, false, false, true)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting id of image: %v", err)
|
||||
}
|
||||
id = strings.TrimSpace(id)
|
||||
|
||||
imgRef, err := storageImageID(store, id)
|
||||
if err != nil {
|
||||
t.Errorf("could not match image: %v", err)
|
||||
} else if imgRef == nil {
|
||||
t.Error("Returned nil Image Reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageImageIDFalse(t *testing.T) {
|
||||
// Make sure the tests are running as root
|
||||
failTestIfNotRoot(t)
|
||||
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if store != nil {
|
||||
is.Transport.SetStore(store)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not get store: %v", err)
|
||||
}
|
||||
// Pull an image so we know we have it
|
||||
|
||||
id := ""
|
||||
|
||||
imgRef, _ := storageImageID(store, id)
|
||||
if imgRef != nil {
|
||||
t.Error("should not have returned Image Reference")
|
||||
}
|
||||
}
|
||||
@@ -6,15 +6,19 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
runFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "set the hostname inside of the container",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "runtime",
|
||||
Usage: "`path` to an alternate runtime",
|
||||
@@ -24,6 +28,14 @@ var (
|
||||
Name: "runtime-flag",
|
||||
Usage: "add global flags for the container runtime",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "security-opt",
|
||||
Usage: "security Options (default [])",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tty",
|
||||
Usage: "allocate a pseudo-TTY in the container",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "volume, v",
|
||||
Usage: "bind mount a host location into the container while running the command",
|
||||
@@ -46,19 +58,13 @@ func runCmd(c *cli.Context) error {
|
||||
return errors.Errorf("container ID must be specified")
|
||||
}
|
||||
name := args[0]
|
||||
args = args.Tail()
|
||||
if err := validateFlags(c, runFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtime := ""
|
||||
if c.IsSet("runtime") {
|
||||
runtime = c.String("runtime")
|
||||
}
|
||||
flags := []string{}
|
||||
if c.IsSet("runtime-flag") {
|
||||
flags = c.StringSlice("runtime-flag")
|
||||
}
|
||||
volumes := []string{}
|
||||
if c.IsSet("v") || c.IsSet("volume") {
|
||||
volumes = c.StringSlice("volume")
|
||||
args = args.Tail()
|
||||
if len(args) > 0 && args[0] == "--" {
|
||||
args = args[1:]
|
||||
}
|
||||
|
||||
store, err := getStore(c)
|
||||
@@ -71,16 +77,26 @@ func runCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
|
||||
hostname := ""
|
||||
if c.IsSet("hostname") {
|
||||
hostname = c.String("hostname")
|
||||
runtimeFlags := []string{}
|
||||
for _, arg := range c.StringSlice("runtime-flag") {
|
||||
runtimeFlags = append(runtimeFlags, "--"+arg)
|
||||
}
|
||||
|
||||
options := buildah.RunOptions{
|
||||
Hostname: hostname,
|
||||
Runtime: runtime,
|
||||
Args: flags,
|
||||
Hostname: c.String("hostname"),
|
||||
Runtime: c.String("runtime"),
|
||||
Args: runtimeFlags,
|
||||
}
|
||||
for _, volumeSpec := range volumes {
|
||||
|
||||
if c.IsSet("tty") {
|
||||
if c.Bool("tty") {
|
||||
options.Terminal = buildah.WithTerminal
|
||||
} else {
|
||||
options.Terminal = buildah.WithoutTerminal
|
||||
}
|
||||
}
|
||||
|
||||
for _, volumeSpec := range c.StringSlice("volume") {
|
||||
volSpec := strings.Split(volumeSpec, ":")
|
||||
if len(volSpec) >= 2 {
|
||||
mountOptions := "bind"
|
||||
|
||||
48
cmd/buildah/version.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
ispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
rspecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
//Overwritten at build time
|
||||
var (
|
||||
gitCommit string
|
||||
buildInfo string
|
||||
)
|
||||
|
||||
//Function to get and print info for version command
|
||||
func versionCmd(c *cli.Context) error {
|
||||
|
||||
//converting unix time from string to int64
|
||||
buildTime, err := strconv.ParseInt(buildInfo, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Version: ", buildah.Version)
|
||||
fmt.Println("Go Version: ", runtime.Version())
|
||||
fmt.Println("Image Spec: ", ispecs.Version)
|
||||
fmt.Println("Runtime Spec: ", rspecs.Version)
|
||||
fmt.Println("Git Commit: ", gitCommit)
|
||||
|
||||
//Prints out the build time in readable format
|
||||
fmt.Println("Built: ", time.Unix(buildTime, 0).Format(time.ANSIC))
|
||||
fmt.Println("OS/Arch: ", runtime.GOOS+"/"+runtime.GOARCH)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//cli command to print out the version info of buildah
|
||||
var versionCommand = cli.Command{
|
||||
Name: "version",
|
||||
Usage: "Display the Buildah Version Information",
|
||||
Action: versionCmd,
|
||||
}
|
||||
231
commit.go
@@ -2,10 +2,11 @@ package buildah
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/signature"
|
||||
is "github.com/containers/image/storage"
|
||||
@@ -13,21 +14,10 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (just 1024 zero bytes). This
|
||||
// comes from github.com/docker/distribution/manifest/schema1/config_builder.go by way of
|
||||
// github.com/containers/image/image/docker_schema2.go; there is a non-zero embedded timestamp; we could
|
||||
// zero that, but that would just waste storage space in registries, so let’s use the same values.
|
||||
gzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CommitOptions can be used to alter how an image is committed.
|
||||
@@ -55,6 +45,9 @@ type CommitOptions struct {
|
||||
// HistoryTimestamp is the timestamp used when creating new items in the
|
||||
// image's history. If unset, the current time will be used.
|
||||
HistoryTimestamp *time.Time
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
}
|
||||
|
||||
// PushOptions can be used to alter how an image is copied somewhere.
|
||||
@@ -74,6 +67,12 @@ type PushOptions struct {
|
||||
ReportWriter io.Writer
|
||||
// Store is the local storage store which holds the source image.
|
||||
Store storage.Store
|
||||
// github.com/containers/image/types SystemContext to hold credentials
|
||||
// and other authentication/authorization information.
|
||||
SystemContext *types.SystemContext
|
||||
// ManifestType is the format to use when saving the imge using the 'dir' transport
|
||||
// possible options are oci, v2s1, and v2s2
|
||||
ManifestType string
|
||||
}
|
||||
|
||||
// shallowCopy copies the most recent layer, the configuration, and the manifest from one image to another.
|
||||
@@ -81,41 +80,50 @@ type PushOptions struct {
|
||||
// almost any other destination has higher expectations.
|
||||
// We assume that "dest" is a reference to a local image (specifically, a containers/image/storage.storageReference),
|
||||
// and will fail if it isn't.
|
||||
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext) error {
|
||||
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext, compression archive.Compression) error {
|
||||
var names []string
|
||||
// Read the target image name.
|
||||
if dest.DockerReference() == nil {
|
||||
return errors.New("can't write to an unnamed image")
|
||||
if dest.DockerReference() != nil {
|
||||
names = []string{dest.DockerReference().String()}
|
||||
}
|
||||
names, err := util.ExpandTags([]string{dest.DockerReference().String()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make a temporary image reference.
|
||||
tmpName := stringid.GenerateRandomID() + "-tmp-" + Package + "-commit"
|
||||
tmpRef, err := is.Transport.ParseStoreReference(b.store, tmpName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err2 := tmpRef.DeleteImage(systemContext); err2 != nil {
|
||||
logrus.Debugf("error deleting temporary image %q: %v", tmpName, err2)
|
||||
}
|
||||
}()
|
||||
// Open the source for reading and a temporary image for writing.
|
||||
// Open the source for reading and the new image for writing.
|
||||
srcImage, err := src.NewImage(systemContext)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading configuration to write to image %q", transports.ImageName(dest))
|
||||
}
|
||||
defer srcImage.Close()
|
||||
tmpImage, err := tmpRef.NewImageDestination(systemContext)
|
||||
destImage, err := dest.NewImageDestination(systemContext)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening temporary copy of image %q for writing", transports.ImageName(dest))
|
||||
return errors.Wrapf(err, "error opening image %q for writing", transports.ImageName(dest))
|
||||
}
|
||||
defer tmpImage.Close()
|
||||
// Write an empty filesystem layer, because the image layer requires at least one.
|
||||
_, err = tmpImage.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Size: int64(len(gzippedEmptyLayer))})
|
||||
// Look up the container's read-write layer.
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing dummy layer for image %q", transports.ImageName(dest))
|
||||
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
||||
}
|
||||
// Extract the read-write layer's contents, using whatever compression the container image used to
|
||||
// calculate the blob sum in the manifest.
|
||||
switch compression {
|
||||
case archive.Gzip:
|
||||
logrus.Debugf("extracting layer %q with gzip", container.LayerID)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return errors.Wrapf(syscall.ENOTSUP, "media type for bzip2-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("extracting layer %q with unknown compressor(?)", container.LayerID)
|
||||
}
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &compression,
|
||||
}
|
||||
layerDiff, err := b.store.Diff("", container.LayerID, diffOptions)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading layer %q from source image %q", container.LayerID, transports.ImageName(src))
|
||||
}
|
||||
defer layerDiff.Close()
|
||||
// Write a copy of the layer as a blob, for the new image to reference.
|
||||
if _, err = destImage.PutBlob(layerDiff, types.BlobInfo{Digest: "", Size: -1}); err != nil {
|
||||
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
||||
}
|
||||
// Read the newly-generated configuration blob.
|
||||
config, err := srcImage.ConfigBlob()
|
||||
@@ -126,106 +134,45 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
return errors.Errorf("error reading new configuration for image %q: it's empty", transports.ImageName(dest))
|
||||
}
|
||||
logrus.Debugf("read configuration blob %q", string(config))
|
||||
// Write the configuration to the temporary image.
|
||||
// Write the configuration to the new image.
|
||||
configBlobInfo := types.BlobInfo{
|
||||
Digest: digest.Canonical.FromBytes(config),
|
||||
Size: int64(len(config)),
|
||||
}
|
||||
_, err = tmpImage.PutBlob(bytes.NewReader(config), configBlobInfo)
|
||||
if err != nil && len(config) > 0 {
|
||||
if _, err = destImage.PutBlob(bytes.NewReader(config), configBlobInfo); err != nil {
|
||||
return errors.Wrapf(err, "error writing image configuration for temporary copy of %q", transports.ImageName(dest))
|
||||
}
|
||||
// Read the newly-generated, mostly fake, manifest.
|
||||
// Read the newly-generated manifest, which already contains a layer entry for the read-write layer.
|
||||
manifest, _, err := srcImage.Manifest()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading new manifest for image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Write the manifest to the temporary image.
|
||||
err = tmpImage.PutManifest(manifest)
|
||||
// Write the manifest to the new image.
|
||||
err = destImage.PutManifest(manifest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing new manifest to temporary copy of image %q", transports.ImageName(dest))
|
||||
return errors.Wrapf(err, "error writing new manifest to image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Save the temporary image.
|
||||
err = tmpImage.Commit()
|
||||
// Save the new image.
|
||||
err = destImage.Commit()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error committing new image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Locate the temporary image in the lower-level API. Read its item names.
|
||||
tmpImg, err := is.Transport.GetStoreImage(b.store, tmpRef)
|
||||
err = destImage.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error locating temporary image %q", transports.ImageName(dest))
|
||||
return errors.Wrapf(err, "error closing new image %q", transports.ImageName(dest))
|
||||
}
|
||||
items, err := b.store.ListImageBigData(tmpImg.ID)
|
||||
image, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading list of named data for image %q", tmpImg.ID)
|
||||
return errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Look up the container's read-write layer.
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
||||
}
|
||||
parentLayer := ""
|
||||
// Look up the container's source image's layer, if there is a source image.
|
||||
if container.ImageID != "" {
|
||||
img, err2 := b.store.Image(container.ImageID)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading information about working container %q's source image", b.ContainerID)
|
||||
}
|
||||
parentLayer = img.TopLayer
|
||||
}
|
||||
// Extract the read-write layer's contents.
|
||||
layerDiff, err := b.store.Diff(parentLayer, container.LayerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading layer from source image %q", transports.ImageName(src))
|
||||
}
|
||||
defer layerDiff.Close()
|
||||
// Write a copy of the layer for the new image to reference.
|
||||
layer, _, err := b.store.PutLayer("", parentLayer, []string{}, "", false, layerDiff)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
||||
}
|
||||
// Create a low-level image record that uses the new layer.
|
||||
image, err := b.store.CreateImage("", []string{}, layer.ID, "", nil)
|
||||
if err != nil {
|
||||
err2 := b.store.DeleteLayer(layer.ID)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error removing layer %q: %v", layer, err2)
|
||||
}
|
||||
return errors.Wrapf(err, "error creating new low-level image %q", transports.ImageName(dest))
|
||||
}
|
||||
logrus.Debugf("created image ID %q", image.ID)
|
||||
defer func() {
|
||||
// Add the target name(s) to the new image.
|
||||
if len(names) > 0 {
|
||||
err = util.AddImageNames(b.store, image, names)
|
||||
if err != nil {
|
||||
_, err2 := b.store.DeleteImage(image.ID, true)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error removing image %q: %v", image.ID, err2)
|
||||
}
|
||||
return errors.Wrapf(err, "error assigning names %v to new image", names)
|
||||
}
|
||||
}()
|
||||
// Copy the configuration and manifest, which are big data items, along with whatever else is there.
|
||||
for _, item := range items {
|
||||
var data []byte
|
||||
data, err = b.store.ImageBigData(tmpImg.ID, item)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying data item %q", item)
|
||||
}
|
||||
err = b.store.SetImageBigData(image.ID, item, data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying data item %q", item)
|
||||
}
|
||||
logrus.Debugf("copied data item %q to %q", item, image.ID)
|
||||
logrus.Debugf("assigned names %v to image %q", names, image.ID)
|
||||
}
|
||||
// Set low-level metadata in the new image so that the image library will accept it as a real image.
|
||||
err = b.store.SetMetadata(image.ID, "{}")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error assigning metadata to new image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Move the target name(s) from the temporary image to the new image.
|
||||
err = util.AddImageNames(b.store, image, names)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error assigning names %v to new image", names)
|
||||
}
|
||||
logrus.Debugf("assigned names %v to image %q", names, image.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -233,30 +180,35 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
// configuration, to a new image in the specified location, and if we know how,
|
||||
// add any additional tags that were specified.
|
||||
func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error {
|
||||
policy, err := signature.DefaultPolicy(getSystemContext(options.SignaturePolicyPath))
|
||||
policy, err := signature.DefaultPolicy(getSystemContext(options.SystemContext, options.SignaturePolicyPath))
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error obtaining default signature policy")
|
||||
}
|
||||
policyContext, err := signature.NewPolicyContext(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error creating new signature policy context")
|
||||
}
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
logrus.Debugf("error destroying signature policy context: %v", err2)
|
||||
}
|
||||
}()
|
||||
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
|
||||
_, destIsStorage := dest.Transport().(is.StoreTransport)
|
||||
exporting := !destIsStorage
|
||||
src, err := b.makeContainerImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
||||
src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing layer digests and building metadata")
|
||||
}
|
||||
if exporting {
|
||||
// Copy everything.
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter))
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, ""))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layers and metadata")
|
||||
}
|
||||
} else {
|
||||
// Copy only the most recent layer, the configuration, and the manifest.
|
||||
err = b.shallowCopy(dest, src, getSystemContext(options.SignaturePolicyPath))
|
||||
err = b.shallowCopy(dest, src, getSystemContext(options.SystemContext, options.SignaturePolicyPath), options.Compression)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layer and metadata")
|
||||
}
|
||||
@@ -282,44 +234,27 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
||||
|
||||
// Push copies the contents of the image to a new location.
|
||||
func Push(image string, dest types.ImageReference, options PushOptions) error {
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
policy, err := signature.DefaultPolicy(systemContext)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error obtaining default signature policy")
|
||||
}
|
||||
policyContext, err := signature.NewPolicyContext(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error creating new signature policy context")
|
||||
}
|
||||
importOptions := ImportFromImageOptions{
|
||||
Image: image,
|
||||
SignaturePolicyPath: options.SignaturePolicyPath,
|
||||
}
|
||||
builder, err := importBuilderFromImage(options.Store, importOptions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error importing builder information from image")
|
||||
}
|
||||
// Look up the image name and its layer.
|
||||
ref, err := is.Transport.ParseStoreReference(options.Store, image)
|
||||
// Look up the image.
|
||||
src, err := is.Transport.ParseStoreReference(options.Store, image)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||
}
|
||||
img, err := is.Transport.GetStoreImage(options.Store, ref)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error locating image %q", image)
|
||||
}
|
||||
// Give the image we're producing the same ancestors as its source image.
|
||||
builder.FromImage = builder.Docker.ContainerConfig.Image
|
||||
builder.FromImageID = string(builder.Docker.Parent)
|
||||
// Prep the layers and manifest for export.
|
||||
src, err := builder.makeImageImageRef(options.Compression, img.Names, img.TopLayer, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error recomputing layer digests and building metadata")
|
||||
}
|
||||
// Copy everything.
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter))
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, options.ManifestType))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layers and metadata")
|
||||
}
|
||||
if options.ReportWriter != nil {
|
||||
fmt.Fprintf(options.ReportWriter, "\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
12
common.go
@@ -7,14 +7,20 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
func getCopyOptions(reportWriter io.Writer) *cp.Options {
|
||||
func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
|
||||
return &cp.Options{
|
||||
ReportWriter: reportWriter,
|
||||
ReportWriter: reportWriter,
|
||||
SourceCtx: sourceSystemContext,
|
||||
DestinationCtx: destinationSystemContext,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
}
|
||||
}
|
||||
|
||||
func getSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if defaults != nil {
|
||||
*sc = *defaults
|
||||
}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
|
||||
67
config.go
@@ -2,7 +2,6 @@ package buildah
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -21,8 +20,9 @@ func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
|
||||
if config == nil {
|
||||
config = &dimage.ContainerConfig
|
||||
}
|
||||
dcreated := dimage.Created.UTC()
|
||||
image := ociv1.Image{
|
||||
Created: dimage.Created.UTC(),
|
||||
Created: &dcreated,
|
||||
Author: dimage.Author,
|
||||
Architecture: dimage.Architecture,
|
||||
OS: dimage.OS,
|
||||
@@ -38,7 +38,7 @@ func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
|
||||
},
|
||||
RootFS: ociv1.RootFS{
|
||||
Type: "",
|
||||
DiffIDs: []string{},
|
||||
DiffIDs: []digest.Digest{},
|
||||
},
|
||||
History: []ociv1.History{},
|
||||
}
|
||||
@@ -51,13 +51,12 @@ func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
|
||||
}
|
||||
if RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range RootFS.DiffIDs {
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, id.String())
|
||||
}
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, RootFS.DiffIDs...)
|
||||
}
|
||||
for _, history := range dimage.History {
|
||||
hcreated := history.Created.UTC()
|
||||
ohistory := ociv1.History{
|
||||
Created: history.Created.UTC(),
|
||||
Created: &hcreated,
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
@@ -98,13 +97,7 @@ func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) {
|
||||
}
|
||||
if oimage.RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range oimage.RootFS.DiffIDs {
|
||||
d, err := digest.Parse(id)
|
||||
if err != nil {
|
||||
return docker.V2Image{}, err
|
||||
}
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d)
|
||||
}
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, oimage.RootFS.DiffIDs...)
|
||||
}
|
||||
for _, history := range oimage.History {
|
||||
dhistory := docker.V2S2History{
|
||||
@@ -145,23 +138,30 @@ func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) {
|
||||
}
|
||||
// Build a filesystem history.
|
||||
history := []docker.V2S2History{}
|
||||
lastID := ""
|
||||
for i := range manifest.History {
|
||||
h := docker.V2S2History{
|
||||
Created: time.Now().UTC(),
|
||||
Author: "",
|
||||
CreatedBy: "",
|
||||
Comment: "",
|
||||
EmptyLayer: false,
|
||||
}
|
||||
// Decode the compatibility field.
|
||||
dcompat := docker.V1Compatibility{}
|
||||
if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil {
|
||||
h.Created = dcompat.Created.UTC()
|
||||
h.Author = dcompat.Author
|
||||
h.Comment = dcompat.Comment
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd)
|
||||
}
|
||||
h.EmptyLayer = dcompat.ThrowAway
|
||||
if err = json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err != nil {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image compatibility data (%q) from history", manifest.History[i].V1Compatibility)
|
||||
}
|
||||
// Skip this history item if it shares the ID of the last one
|
||||
// that we saw, since the image library will do the same.
|
||||
if i > 0 && dcompat.ID == lastID {
|
||||
continue
|
||||
}
|
||||
lastID = dcompat.ID
|
||||
// Construct a new history item using the recovered information.
|
||||
createdBy := ""
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
createdBy = strings.Join(dcompat.ContainerConfig.Cmd, " ")
|
||||
}
|
||||
h := docker.V2S2History{
|
||||
Created: dcompat.Created.UTC(),
|
||||
Author: dcompat.Author,
|
||||
CreatedBy: createdBy,
|
||||
Comment: dcompat.Comment,
|
||||
EmptyLayer: dcompat.ThrowAway,
|
||||
}
|
||||
// Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order
|
||||
// compared to v2s2, which lists earlier layers before later ones.
|
||||
@@ -224,8 +224,8 @@ func (b *Builder) fixupConfig() {
|
||||
if b.Docker.Created.IsZero() {
|
||||
b.Docker.Created = now
|
||||
}
|
||||
if b.OCIv1.Created.IsZero() {
|
||||
b.OCIv1.Created = now
|
||||
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
|
||||
b.OCIv1.Created = &now
|
||||
}
|
||||
if b.OS() == "" {
|
||||
b.SetOS(runtime.GOOS)
|
||||
@@ -559,3 +559,8 @@ func (b *Builder) Domainname() string {
|
||||
func (b *Builder) SetDomainname(name string) {
|
||||
b.Docker.Config.Domainname = name
|
||||
}
|
||||
|
||||
// SetDefaultMountsFilePath sets the mounts file path for testing purposes
|
||||
func (b *Builder) SetDefaultMountsFilePath(path string) {
|
||||
b.DefaultMountsFilePath = path
|
||||
}
|
||||
|
||||
@@ -168,6 +168,7 @@ return 1
|
||||
--runroot
|
||||
--storage-driver
|
||||
--storage-opt
|
||||
--default-mounts-file
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
@@ -209,6 +210,10 @@ return 1
|
||||
|
||||
_buildah_rmi() {
|
||||
local boolean_options="
|
||||
--all
|
||||
-a
|
||||
--force
|
||||
-f
|
||||
--help
|
||||
-h
|
||||
"
|
||||
@@ -225,6 +230,8 @@ return 1
|
||||
|
||||
_buildah_rm() {
|
||||
local boolean_options="
|
||||
--all
|
||||
-a
|
||||
--help
|
||||
-h
|
||||
"
|
||||
@@ -290,9 +297,14 @@ return 1
|
||||
-D
|
||||
--quiet
|
||||
-q
|
||||
--rm
|
||||
--tls-verify
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--signature-policy
|
||||
--format
|
||||
-f
|
||||
@@ -336,19 +348,24 @@ return 1
|
||||
--pull-always
|
||||
--quiet
|
||||
-q
|
||||
--tls-verify
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--registry
|
||||
--signature-policy
|
||||
--authfile
|
||||
--build-arg
|
||||
--cert-dir
|
||||
--creds
|
||||
-f
|
||||
--file
|
||||
--format
|
||||
--label
|
||||
--runtime
|
||||
--runtime-flag
|
||||
--tag
|
||||
--security-opt
|
||||
--signature-policy
|
||||
-t
|
||||
--file
|
||||
-f
|
||||
--build-arg
|
||||
--format
|
||||
--tag
|
||||
"
|
||||
|
||||
local all_options="$options_with_args $boolean_options"
|
||||
@@ -376,12 +393,15 @@ return 1
|
||||
_buildah_run() {
|
||||
local boolean_options="
|
||||
--help
|
||||
--tty
|
||||
-h
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--hostname
|
||||
--runtime
|
||||
--runtime-flag
|
||||
--security-opt
|
||||
--volume
|
||||
-v
|
||||
"
|
||||
@@ -470,9 +490,15 @@ return 1
|
||||
-D
|
||||
--quiet
|
||||
-q
|
||||
--tls-verify
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--format
|
||||
-f
|
||||
--signature-policy
|
||||
"
|
||||
|
||||
@@ -529,14 +555,20 @@ return 1
|
||||
local boolean_options="
|
||||
--help
|
||||
-h
|
||||
--json
|
||||
--quiet
|
||||
-q
|
||||
--noheading
|
||||
-n
|
||||
--notruncate
|
||||
-a
|
||||
--all
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--filter
|
||||
-f
|
||||
--format
|
||||
"
|
||||
|
||||
local all_options="$options_with_args $boolean_options"
|
||||
@@ -552,6 +584,7 @@ return 1
|
||||
local boolean_options="
|
||||
--help
|
||||
-h
|
||||
--json
|
||||
--quiet
|
||||
-q
|
||||
--noheading
|
||||
@@ -609,11 +642,14 @@ return 1
|
||||
--pull-always
|
||||
--quiet
|
||||
-q
|
||||
--tls-verify
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--name
|
||||
--registry
|
||||
--signature-policy
|
||||
"
|
||||
|
||||
@@ -628,6 +664,16 @@ return 1
|
||||
esac
|
||||
}
|
||||
|
||||
_buildah_version() {
|
||||
local boolean_options="
|
||||
--help
|
||||
-h
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
"
|
||||
}
|
||||
|
||||
_buildah() {
|
||||
local previous_extglob_setting=$(shopt -p extglob)
|
||||
shopt -s extglob
|
||||
@@ -651,6 +697,7 @@ return 1
|
||||
tag
|
||||
umount
|
||||
unmount
|
||||
version
|
||||
)
|
||||
|
||||
# These options are valid as global options for all client commands
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
# https://github.com/projectatomic/buildah
|
||||
%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo}
|
||||
%global import_path %{provider_prefix}
|
||||
%global commit a0a5333b94264d1fb1e072d63bcb98f9e2981b49
|
||||
%global commit REPLACEWITHCOMMITID
|
||||
%global shortcommit %(c=%{commit}; echo ${c:0:7})
|
||||
|
||||
Name: buildah
|
||||
Version: 0.1
|
||||
Version: 0.11
|
||||
Release: 1.git%{shortcommit}%{?dist}
|
||||
Summary: A command line tool used to creating OCI Images
|
||||
License: ASL 2.0
|
||||
@@ -41,7 +41,11 @@ BuildRequires: gpgme-devel
|
||||
BuildRequires: device-mapper-devel
|
||||
BuildRequires: btrfs-progs-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: make
|
||||
Requires: runc >= 1.0.0-6
|
||||
Requires: container-selinux
|
||||
Requires: skopeo-containers
|
||||
Provides: %{repo} = %{version}-%{release}
|
||||
|
||||
@@ -67,7 +71,7 @@ popd
|
||||
mv vendor src
|
||||
|
||||
export GOPATH=$(pwd)/_build:$(pwd):%{gopath}
|
||||
make all
|
||||
make all GIT_COMMIT=%{shortcommit}
|
||||
|
||||
%install
|
||||
export GOPATH=$(pwd)/_build:$(pwd):%{gopath}
|
||||
@@ -85,5 +89,130 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
%{_datadir}/bash-completion/completions/*
|
||||
|
||||
%changelog
|
||||
* Mon Feb 12 2018 Dan Walsh <dwalsh@redhat.com> 0.12-1
|
||||
- Added handing for simpler error message for Unknown Dockerfile instructions.
|
||||
- Change default certs directory to /etc/containers/certs.dir
|
||||
- Vendor in latest containers/image
|
||||
- Vendor in latest containers/storage
|
||||
- build-using-dockerfile: set the 'author' field for MAINTAINER
|
||||
- Return exit code 1 when buildah-rmi fails
|
||||
- Trim the image reference to just its name before calling getImageName
|
||||
- Touch up rmi -f usage statement
|
||||
- Add --format and --filter to buildah containers
|
||||
- Add --prune,-p option to rmi command
|
||||
- Add authfile param to commit
|
||||
- Fix --runtime-flag for buildah run and bud
|
||||
- format should override quiet for images
|
||||
- Allow all auth params to work with bud
|
||||
- Do not overwrite directory permissions on --chown
|
||||
- Unescape HTML characters output into the terminal
|
||||
- Fix: setting the container name to the image
|
||||
- Prompt for un/pwd if not supplied with --creds
|
||||
- Make bud be really quiet
|
||||
- Return a better error message when failed to resolve an image
|
||||
- Update auth tests and fix bud man page
|
||||
|
||||
* Tue Jan 16 2018 Dan Walsh <dwalsh@redhat.com> 0.11-1
|
||||
- Add --all to remove containers
|
||||
- Add --all functionality to rmi
|
||||
- Show ctrid when doing rm -all
|
||||
- Ignore sequential duplicate layers when reading v2s1
|
||||
- Lots of minor bug fixes
|
||||
- Vendor in latest containers/image and containers/storage
|
||||
|
||||
* Sat Dec 23 2017 Dan Walsh <dwalsh@redhat.com> 0.10-1
|
||||
- Display Config and Manifest as strings
|
||||
- Bump containers/image
|
||||
- Use configured registries to resolve image names
|
||||
- Update to work with newer image library
|
||||
- Add --chown option to add/copy commands
|
||||
|
||||
* Sat Dec 2 2017 Dan Walsh <dwalsh@redhat.com> 0.9-1
|
||||
- Allow push to use the image id
|
||||
- Make sure builtin volumes have the correct label
|
||||
|
||||
* Thu Nov 16 2017 Dan Walsh <dwalsh@redhat.com> 0.8-1
|
||||
- Buildah bud was failing on SELinux machines, this fixes this
|
||||
- Block access to certain kernel file systems inside of the container
|
||||
|
||||
* Thu Nov 16 2017 Dan Walsh <dwalsh@redhat.com> 0.7-1
|
||||
- Ignore errors when trying to read containers buildah.json for loading SELinux reservations
|
||||
- Use credentials from kpod login for buildah
|
||||
|
||||
* Wed Nov 15 2017 Dan Walsh <dwalsh@redhat.com> 0.6-1
|
||||
- Adds support for converting manifest types when using the dir transport
|
||||
- Rework how we do UID resolution in images
|
||||
- Bump github.com/vbatts/tar-split
|
||||
- Set option.terminal appropriately in run
|
||||
|
||||
* Wed Nov 08 2017 Dan Walsh <dwalsh@redhat.com> 0.5-2
|
||||
- Bump github.com/vbatts/tar-split
|
||||
- Fixes CVE That could allow a container image to cause a DOS
|
||||
|
||||
* Tue Nov 07 2017 Dan Walsh <dwalsh@redhat.com> 0.5-1
|
||||
- Add secrets patch to buildah
|
||||
- Add proper SELinux labeling to buildah run
|
||||
- Add tls-verify to bud command
|
||||
- Make filtering by date use the image's date
|
||||
- images: don't list unnamed images twice
|
||||
- Fix timeout issue
|
||||
- Add further tty verbiage to buildah run
|
||||
- Make inspect try an image on failure if type not specified
|
||||
- Add support for `buildah run --hostname`
|
||||
- Tons of bug fixes and code cleanup
|
||||
|
||||
* Fri Sep 22 2017 Dan Walsh <dwalsh@redhat.com> 0.4-1.git9cbccf88c
|
||||
- Add default transport to push if not provided
|
||||
- Avoid trying to print a nil ImageReference
|
||||
- Add authentication to commit and push
|
||||
- Add information on buildah from man page on transports
|
||||
- Remove --transport flag
|
||||
- Run: do not complain about missing volume locations
|
||||
- Add credentials to buildah from
|
||||
- Remove export command
|
||||
- Run(): create the right working directory
|
||||
- Improve "from" behavior with unnamed references
|
||||
- Avoid parsing image metadata for dates and layers
|
||||
- Read the image's creation date from public API
|
||||
- Bump containers/storage and containers/image
|
||||
- Don't panic if an image's ID can't be parsed
|
||||
- Turn on --enable-gc when running gometalinter
|
||||
- rmi: handle truncated image IDs
|
||||
|
||||
* Tue Aug 15 2017 Josh Boyer <jwboyer@redhat.com> - 0.3-5.gitb9b2a8a
|
||||
- Build for s390x as well
|
||||
|
||||
* Wed Aug 02 2017 Fedora Release Engineering <releng@fedoraproject.org> - 0.3-4.gitb9b2a8a
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
|
||||
|
||||
* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 0.3-3.gitb9b2a8a
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
|
||||
|
||||
* Thu Jul 20 2017 Dan Walsh <dwalsh@redhat.com> 0.3-2.gitb9b2a8a7e
|
||||
- Bump for inclusion of OCI 1.0 Runtime and Image Spec
|
||||
|
||||
* Tue Jul 18 2017 Dan Walsh <dwalsh@redhat.com> 0.2.0-1.gitac2aad6
|
||||
- buildah run: Add support for -- ending options parsing
|
||||
- buildah Add/Copy support for glob syntax
|
||||
- buildah commit: Add flag to remove containers on commit
|
||||
- buildah push: Improve man page and help information
|
||||
- buildah run: add a way to disable PTY allocation
|
||||
- Buildah docs: clarify --runtime-flag of run command
|
||||
- Update to match newer storage and image-spec APIs
|
||||
- Update containers/storage and containers/image versions
|
||||
- buildah export: add support
|
||||
- buildah images: update commands
|
||||
- buildah images: Add JSON output option
|
||||
- buildah rmi: update commands
|
||||
- buildah containers: Add JSON output option
|
||||
- buildah version: add command
|
||||
- buildah run: Handle run without an explicit command correctly
|
||||
- Ensure volume points get created, and with perms
|
||||
- buildah containers: Add a -a/--all option
|
||||
|
||||
* Wed Jun 14 2017 Dan Walsh <dwalsh@redhat.com> 0.1.0-2.git597d2ab9
|
||||
- Release Candidate 1
|
||||
- All features have now been implemented.
|
||||
|
||||
* Fri Apr 14 2017 Dan Walsh <dwalsh@redhat.com> 0.0.1-1.git7a0a5333
|
||||
- First package for Fedora
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -13,5 +14,5 @@ func (b *Builder) Delete() error {
|
||||
b.MountPoint = ""
|
||||
b.Container = ""
|
||||
b.ContainerID = ""
|
||||
return nil
|
||||
return label.ReleaseLabel(b.ProcessLabel)
|
||||
}
|
||||
|
||||
@@ -13,10 +13,18 @@ appears to be an archive, its contents are extracted and added instead of the
|
||||
archive file itself. If a local directory is specified as a source, its
|
||||
*contents* are copied to the destination.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--chown** *owner*:*group*
|
||||
|
||||
Sets the user and group ownership of the destination content.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah add containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah add --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah add containerID '/home/myuser/myproject.go'
|
||||
|
||||
buildah add containerID '/home/myuser/myfiles.tar' '/tmp'
|
||||
|
||||
@@ -13,6 +13,28 @@ build context directory. The build context directory can be specified as the
|
||||
to a temporary location.
|
||||
|
||||
## OPTIONS
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--build-arg** *arg=value*
|
||||
|
||||
Specifies a build argument and its value, which will be interpolated in
|
||||
instructions read from the Dockerfiles in the same way that environment
|
||||
variables are, but which will not be added to environment variable list in the
|
||||
resulting image's configuration.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**-f, --file** *Dockerfile*
|
||||
|
||||
@@ -25,6 +47,12 @@ If a build context is not specified, and at least one Dockerfile is a
|
||||
local file, the directory in which it resides will be used as the build
|
||||
context.
|
||||
|
||||
**--format**
|
||||
|
||||
Control the format for the built image's manifest and configuration data.
|
||||
Recognized formats include *oci* (OCI image-spec v1.0, the default) and
|
||||
*docker* (version 2, using schema format 2 for the manifest).
|
||||
|
||||
**--pull**
|
||||
|
||||
Pull the image if it is not present. If this flag is disabled (with
|
||||
@@ -35,23 +63,11 @@ Defaults to *true*.
|
||||
|
||||
Pull the image even if a version of the image is already present.
|
||||
|
||||
**--registry** *registry*
|
||||
**-q, --quiet**
|
||||
|
||||
A prefix to prepend to the image name in order to pull the image. Default
|
||||
value is "docker://"
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--build-arg** *arg=value*
|
||||
|
||||
Specifies a build argument and its value, which will be interpolated in
|
||||
instructions read from the Dockerfiles in the same way that environment
|
||||
variables are, but which will not be added to environment variable list in the
|
||||
resulting image's configuration.
|
||||
Suppress output messages which indicate which instruction is being processed,
|
||||
and of progress when pulling images from a registry, and when writing the
|
||||
output image.
|
||||
|
||||
**--runtime** *path*
|
||||
|
||||
@@ -60,24 +76,26 @@ commands specified by the **RUN** instruction.
|
||||
|
||||
**--runtime-flag** *flag*
|
||||
|
||||
Adds global flags for the container rutime.
|
||||
Adds global flags for the container rutime. To list the supported flags, please
|
||||
consult manpages of your selected container runtime (`runc` is the default
|
||||
runtime, the manpage to consult is `runc(8)`).
|
||||
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
|
||||
to buildah bud, the option given would be `--runtime-flag log-format=json`.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**-t, --tag** *imageName*
|
||||
|
||||
Specifies the name which will be assigned to the resulting image if the build
|
||||
process completes successfully.
|
||||
|
||||
**--format**
|
||||
**--tls-verify** *bool-value*
|
||||
|
||||
Control the format for the built image's manifest and configuration data.
|
||||
Recognized formats include *oci* (OCI image-spec v1.0, the default) and
|
||||
*docker* (version 2, using schema format 2 for the manifest).
|
||||
|
||||
**--quiet**
|
||||
|
||||
Suppress output messages which indicate which instruction is being processed,
|
||||
and of progress when pulling images from a registry, and when writing the
|
||||
output image.
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
@@ -89,7 +107,15 @@ buildah bud -f Dockerfile.simple -f Dockerfile.notsosimple
|
||||
|
||||
buildah bud -t imageName .
|
||||
|
||||
buildah bud -t imageName -f Dockerfile.simple
|
||||
buildah bud --tls-verify=true -t imageName -f Dockerfile.simple
|
||||
|
||||
buildah bud --tls-verify=false -t imageName .
|
||||
|
||||
buildah bud --runtime-flag log-format=json .
|
||||
|
||||
buildah bud --runtime-flag debug .
|
||||
|
||||
buildah bud --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password -t imageName -f Dockerfile.simple
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -13,19 +13,26 @@ specified, an ID is assigned, but no name is assigned to the image.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--disable-compression, -D**
|
||||
|
||||
Don't compress filesystem layers when building the image.
|
||||
|
||||
**--signature-policy**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--format**
|
||||
|
||||
@@ -33,15 +40,47 @@ Control the format for the image manifest and configuration data. Recognized
|
||||
formats include *oci* (OCI image-spec v1.0, the default) and *docker* (version
|
||||
2, using schema format 2 for the manifest).
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--rm**
|
||||
Remove the container and its content after committing it to an image.
|
||||
Default leaves the container and its content in place.
|
||||
|
||||
**--signature-policy**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--tls-verify** *bool-value*
|
||||
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah commit containerID
|
||||
This example saves an image based on the container.
|
||||
`buildah commit containerID`
|
||||
|
||||
buildah commit containerID newImageName
|
||||
This example saves an image named newImageName based on the container.
|
||||
`buildah commit --rm containerID newImageName`
|
||||
|
||||
buildah commit --disable-compression --signature-policy '/etc/containers/policy.json' containerID
|
||||
|
||||
buildah commit --disable-compression --signature-policy '/etc/containers/policy.json' containerID newImageName
|
||||
This example saves an image based on the container disabling compression.
|
||||
`buildah commit --disable-compression containerID`
|
||||
|
||||
This example saves an image named newImageName based on the container disabling compression.
|
||||
`buildah commit --disable-compression containerID newImageName`
|
||||
|
||||
This example commits the container to the image on the local registry while turning off tls verification.
|
||||
`buildah commit --tls-verify=false containerID docker://localhost:5000/imageId`
|
||||
|
||||
This example commits the container to the image on the local registry using credentials and certificates for authentication.
|
||||
`buildah commit --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
|
||||
|
||||
This example commits the container to the image on the local registry using credentials from the /tmp/auths/myauths.json file and certificates for authentication.
|
||||
`buildah commit --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -7,11 +7,47 @@ buildah containers - List the working containers and their base images.
|
||||
**buildah** **containers** [*options* [...]]
|
||||
|
||||
## DESCRIPTION
|
||||
Lists containers which appear to be buildah working containers, their names and
|
||||
Lists containers which appear to be Buildah working containers, their names and
|
||||
IDs, and the names and IDs of the images from which they were initialized.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
List information about all containers, including those which were not created
|
||||
by and are not being used by Buildah. Containers created by Buildah are
|
||||
denoted with an '*' in the 'BUILDER' column.
|
||||
|
||||
**--filter, -f**
|
||||
|
||||
Filter output based on conditions provided.
|
||||
|
||||
Valid filters are listed below:
|
||||
|
||||
| **Filter** | **Description** |
|
||||
| --------------- | ------------------------------------------------------------------- |
|
||||
| id | [ID] Container's ID |
|
||||
| name | [Name] Container's name |
|
||||
| ancestor | [ImageName] Image or descendant used to create container |
|
||||
|
||||
**--format**
|
||||
|
||||
Pretty-print containers using a Go template.
|
||||
|
||||
Valid placeholders for the Go template are listed below:
|
||||
|
||||
| **Placeholder** | **Description** |
|
||||
| --------------- | -----------------------------------------|
|
||||
| .ContainerID | Container ID |
|
||||
| .Builder | Whether container was created by buildah |
|
||||
| .ImageID | Image ID |
|
||||
| .ImageName | Image name |
|
||||
| .ContainerName | Container name |
|
||||
|
||||
**--json**
|
||||
|
||||
Output in JSON format.
|
||||
|
||||
**--noheading, -n**
|
||||
|
||||
Omit the table headings from the listing of containers.
|
||||
@@ -27,10 +63,55 @@ Displays only the container IDs.
|
||||
## EXAMPLE
|
||||
|
||||
buildah containers
|
||||
```
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
29bdb522fc62 * 3fd9065eaf02 docker.io/library/alpine:latest alpine-working-container
|
||||
c6b04237ac8e * f9b6f7f7b9d3 docker.io/library/busybox:latest busybox-working-container
|
||||
```
|
||||
|
||||
buildah containers --quiet
|
||||
```
|
||||
29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0
|
||||
c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb
|
||||
```
|
||||
|
||||
buildah containers -q --noheading --notruncate
|
||||
```
|
||||
29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0
|
||||
c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb
|
||||
```
|
||||
|
||||
buildah containers --json
|
||||
```
|
||||
[
|
||||
{
|
||||
"id": "29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0",
|
||||
"builder": true,
|
||||
"imageid": "3fd9065eaf02feaf94d68376da52541925650b81698c53c6824d92ff63f98353",
|
||||
"imagename": "docker.io/library/alpine:latest",
|
||||
"containername": "alpine-working-container"
|
||||
},
|
||||
{
|
||||
"id": "c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb",
|
||||
"builder": true,
|
||||
"imageid": "f9b6f7f7b9d34113f66e16a9da3e921a580937aec98da344b852ca540aaa2242",
|
||||
"imagename": "docker.io/library/busybox:latest",
|
||||
"containername": "busybox-working-container"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
buildah containers --format "{{.ContainerID}} {{.ContainerName}}"
|
||||
```
|
||||
3fbeaa87e583ee7a3e6787b2d3af961ef21946a0c01a08938e4f52d53cce4c04 myalpine-working-container
|
||||
fbfd3505376ee639c3ed50f9d32b78445cd59198a1dfcacf2e7958cda2516d5c ubuntu-working-container
|
||||
```
|
||||
|
||||
buildah containers --filter ancestor=ubuntu
|
||||
```
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
fbfd3505376e * 0ff04b2e7b63 docker.io/library/ubuntu:latest ubuntu-working-container
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -11,10 +11,18 @@ Copies the contents of a file, URL, or a directory to a container's working
|
||||
directory or a specified location in the container. If a local directory is
|
||||
specified as a source, its *contents* are copied to the destination.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--chown** *owner*:*group*
|
||||
|
||||
Sets the user and group ownership of the destination content.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah copy containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah copy --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah copy containerID '/home/myuser/myproject.go'
|
||||
|
||||
buildah copy containerID '/home/myuser/myfiles.tar' '/tmp'
|
||||
|
||||
@@ -8,13 +8,50 @@ buildah from - Creates a new working container, either from scratch or using a s
|
||||
|
||||
## DESCRIPTION
|
||||
Creates a working container based upon the specified image name. If the
|
||||
supplied image name is "scratch" a new empty container is created.
|
||||
supplied image name is "scratch" a new empty container is created. Image names
|
||||
uses a "transport":"details" format.
|
||||
|
||||
Multiple transports are supported:
|
||||
|
||||
**dir:**_path_
|
||||
An existing local directory _path_ retrieving the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_ (Default)
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_
|
||||
An image is retrieved as a `docker load` formatted file.
|
||||
|
||||
**docker-daemon:**_docker-reference_
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
## RETURN VALUE
|
||||
The container ID of the container that was created. On error, -1 is returned and errno is returned.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--name** *name*
|
||||
|
||||
A *name* for the working container
|
||||
@@ -29,10 +66,9 @@ Defaults to *true*.
|
||||
|
||||
Pull the image even if a version of the image is already present.
|
||||
|
||||
**--registry** *registry*
|
||||
**--quiet**
|
||||
|
||||
A prefix to prepend to the image name in order to pull the image. Default
|
||||
value is "docker://"
|
||||
If an image needs to be pulled from the registry, suppress progress output.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
@@ -40,19 +76,25 @@ Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--quiet**
|
||||
**--tls-verify** *bool-value*
|
||||
|
||||
If an image needs to be pulled from the registry, suppress progress output.
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah from imagename --pull --registry "myregistry://"
|
||||
buildah from imagename --pull
|
||||
|
||||
buildah from myregistry://imagename --pull
|
||||
buildah from docker://myregistry.example.com/imagename --pull
|
||||
|
||||
buildah from imagename --signature-policy /etc/containers/policy.json
|
||||
|
||||
buildah from imagename --pull-always --registry "myregistry://" --name "mycontainer"
|
||||
buildah from docker://myregistry.example.com/imagename --pull-always --name "mycontainer"
|
||||
|
||||
buildah from myregistry/myrepository/imagename:imagetag --tls-verify=false
|
||||
|
||||
buildah from myregistry/myrepository/imagename:imagetag --creds=myusername:mypassword --cert-dir ~/auth
|
||||
|
||||
buildah from myregistry/myrepository/imagename:imagetag --authfile=/tmp/auths/myauths.json
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -11,25 +11,46 @@ Displays locally stored images, their names, and their IDs.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--digests**
|
||||
|
||||
Show the image digests.
|
||||
|
||||
**--filter, -f=[]**
|
||||
|
||||
Filter output based on conditions provided (default []). Valid
|
||||
keywords are 'dangling', 'label', 'before' and 'since'.
|
||||
|
||||
**--format="TEMPLATE"**
|
||||
|
||||
Pretty-print images using a Go template.
|
||||
|
||||
**--json**
|
||||
|
||||
Display the output in JSON format.
|
||||
|
||||
**--noheading, -n**
|
||||
|
||||
Omit the table headings from the listing of images.
|
||||
|
||||
**--notruncate**
|
||||
**no-trunc**
|
||||
|
||||
Do not truncate output.
|
||||
|
||||
**--quiet, -q**
|
||||
|
||||
Lists only the image IDs.
|
||||
Displays only the image IDs.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah images
|
||||
|
||||
buildah images --json
|
||||
|
||||
buildah images --quiet
|
||||
|
||||
buildah images -q --noheading --notruncate
|
||||
|
||||
buildah images --filter dangling=true
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -7,7 +7,7 @@ buildah inspect - Display information about working containers or images.
|
||||
**buildah** **inspect** [*options* [...] --] **ID**
|
||||
|
||||
## DESCRIPTION
|
||||
Prints the low-level information on buildah object(s) (e.g. container, images) identified by name or ID. By default, this will render all results in a
|
||||
Prints the low-level information on Buildah object(s) (e.g. container, images) identified by name or ID. By default, this will render all results in a
|
||||
JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified,
|
||||
the given template will be executed for each result.
|
||||
|
||||
@@ -19,7 +19,7 @@ Use *template* as a Go template when formatting the output.
|
||||
|
||||
Users of this option should be familiar with the [*text/template*
|
||||
package](https://golang.org/pkg/text/template/) in the Go standard library, and
|
||||
of internals of buildah's implementation.
|
||||
of internals of Buildah's implementation.
|
||||
|
||||
**--type** *container* | *image*
|
||||
|
||||
|
||||
@@ -10,29 +10,101 @@ buildah push - Push an image from local storage to elsewhere.
|
||||
Pushes an image from local storage to a specified destination, decompressing
|
||||
and recompessing layers as needed.
|
||||
|
||||
## imageID
|
||||
Image stored in local container/storage
|
||||
|
||||
## DESTINATION
|
||||
|
||||
The DESTINATION is a location to store container images
|
||||
The Image "DESTINATION" uses a "transport":"details" format.
|
||||
|
||||
Multiple transports are supported:
|
||||
|
||||
**dir:**_path_
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
**docker-daemon:**_docker-reference_
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--disable-compression, -D**
|
||||
|
||||
Don't compress copies of filesystem layers which will be pushed.
|
||||
|
||||
**--format, -f**
|
||||
|
||||
Manifest Type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet**
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
|
||||
**--signature-policy**
|
||||
|
||||
Pathname of a signature policy file to use. It is not recommended that this
|
||||
option be used, as the default behavior of using the system-wide default policy
|
||||
(frequently */etc/containers/policy.json*) is most often preferred.
|
||||
|
||||
**--quiet**
|
||||
**--tls-verify** *bool-value*
|
||||
|
||||
When writing the output image, suppress progress output.
|
||||
Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah push imageID dir:/path/to/image
|
||||
This example extracts the imageID image to a local directory in docker format.
|
||||
|
||||
buildah push imageID oci-layout:/path/to/layout
|
||||
`# buildah push imageID dir:/path/to/image`
|
||||
|
||||
buildah push imageID docker://registry/repository:tag
|
||||
This example extracts the imageID image to a local directory in oci format.
|
||||
|
||||
`# buildah push imageID oci:/path/to/layout`
|
||||
|
||||
This example extracts the imageID image to a container registry named registry.example.com.
|
||||
|
||||
`# buildah push imageID docker://registry.example.com/repository:tag`
|
||||
|
||||
This example extracts the imageID image to a private container registry named registry.example.com with authentication from /tmp/auths/myauths.json.
|
||||
|
||||
`# buildah push --authfile /tmp/auths/myauths.json imageID docker://registry.example.com/repository:tag`
|
||||
|
||||
This example extracts the imageID image and puts into the local docker container store.
|
||||
|
||||
`# buildah push imageID docker-daemon:image:tag`
|
||||
|
||||
This example extracts the imageID image and puts it into the registry on the localhost while turning off tls verification.
|
||||
`# buildah push --tls-verify=false imageID docker://localhost:5000/my-imageID`
|
||||
|
||||
This example extracts the imageID image and puts it into the registry on the localhost using credentials and certificates for authentication.
|
||||
`# buildah push --cert-dir ~/auth --tls-verify=true --creds=username:password imageID docker://localhost:5000/my-imageID`
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -9,11 +9,19 @@ buildah rm - Removes one or more working containers.
|
||||
## DESCRIPTION
|
||||
Removes one or more working containers, unmounting them if necessary.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
All Buildah containers will be removed. Buildah containers are denoted with an '*' in the 'BUILDER' column listed by the command 'buildah containers'.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah rm containerID
|
||||
|
||||
buildah rm containerID1 containerID2 containerID3
|
||||
|
||||
buildah rm --all
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -9,10 +9,37 @@ buildah rmi - Removes one or more images.
|
||||
## DESCRIPTION
|
||||
Removes one or more locally stored images.
|
||||
|
||||
## LIMITATIONS
|
||||
If the image was pushed to a directory path using the 'dir:' transport
|
||||
the rmi command can not remove the image. Instead standard file system
|
||||
commands should be used.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
All local images will be removed from the system that do not have containers using the image as a reference image.
|
||||
|
||||
**--prune, -p**
|
||||
|
||||
All local images will be removed from the system that do not have a tag and do not have a child image pointing to them.
|
||||
|
||||
**--force, -f**
|
||||
|
||||
This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah rmi imageID
|
||||
|
||||
buildah rmi --all
|
||||
|
||||
buildah rmi --all --force
|
||||
|
||||
buildah rmi --prune
|
||||
|
||||
buildah rmi --force imageID
|
||||
|
||||
buildah rmi imageID1 imageID2 imageID3
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
@@ -10,9 +10,12 @@ buildah run - Run a command inside of the container.
|
||||
Launches a container and runs the specified command in that container using the
|
||||
container's root filesystem as a root filesystem, using configuration settings
|
||||
inherited from the container's image or as specified using previous calls to
|
||||
the *buildah config* command.
|
||||
the *buildah config* command. If you execute *buildah run* and expect an
|
||||
interactive shell, you need to specify the --tty flag.
|
||||
|
||||
## OPTIONS
|
||||
**--hostname**
|
||||
Set the hostname inside of the running container.
|
||||
|
||||
**--runtime** *path*
|
||||
|
||||
@@ -20,17 +23,40 @@ The *path* to an alternate OCI-compatible runtime.
|
||||
|
||||
**--runtime-flag** *flag*
|
||||
|
||||
Adds global flags for the container rutime.
|
||||
Adds global flags for the container runtime. To list the supported flags, please
|
||||
consult manpages of your selected container runtime (`runc` is the default
|
||||
runtime, the manpage to consult is `runc(8)`).
|
||||
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
|
||||
to buildah run, the option given would be `--runtime-flag log-format=json`.
|
||||
|
||||
**--tty**
|
||||
|
||||
By default a pseudo-TTY is allocated only when buildah's standard input is
|
||||
attached to a pseudo-TTY. Setting the `--tty` option to `true` will cause a
|
||||
pseudo-TTY to be allocated inside the container connecting the user's "terminal"
|
||||
with the stdin and stdout stream of the container. Setting the `--tty` option to
|
||||
`false` will prevent the pseudo-TTY from being allocated.
|
||||
|
||||
**--volume, -v** *source*:*destination*:*flags*
|
||||
|
||||
Bind mount a location from the host into the container for its lifetime.
|
||||
|
||||
NOTE: End parsing of options with the `--` option, so that you can pass other
|
||||
options to the command inside of the container
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah run containerID 'ps -auxw'
|
||||
buildah run containerID -- ps -auxw
|
||||
|
||||
buildah run containerID --runtime-flag --no-new-keyring 'ps -auxw'
|
||||
buildah run containerID --hostname myhost -- ps -auxw
|
||||
|
||||
buildah run --runtime-flag log-format=json containerID /bin/bash
|
||||
|
||||
buildah run --runtime-flag debug containerID /bin/bash
|
||||
|
||||
buildah run --tty containerID /bin/bash
|
||||
|
||||
buildah run --tty=false containerID ls /
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
27
docs/buildah-version.md
Normal file
@@ -0,0 +1,27 @@
|
||||
## buildah-version "1" "June 2017" "Buildah"
|
||||
|
||||
## NAME
|
||||
buildah version - Display the Buildah Version Information.
|
||||
|
||||
## SYNOPSIS
|
||||
**buildah version**
|
||||
[**--help**|**-h**]
|
||||
|
||||
## DESCRIPTION
|
||||
Shows the following information: Version, Go Version, Image Spec, Runtime Spec, Git Commit, Build Time, OS, and Architecture.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--help, -h**
|
||||
Print usage statement
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah version
|
||||
|
||||
buildah version --help
|
||||
|
||||
buildah version -h
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
@@ -1,14 +1,14 @@
|
||||
## buildah "1" "March 2017" "buildah"
|
||||
|
||||
## NAME
|
||||
buildah - A command line tool to facilitate working with containers and using them to build images.
|
||||
Buildah - A command line tool to facilitate working with containers and using them to build images.
|
||||
|
||||
## SYNOPSIS
|
||||
buildah [OPTIONS] COMMAND [ARG...]
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
The buildah package provides a command line tool which can be used to:
|
||||
The Buildah package provides a command line tool which can be used to:
|
||||
|
||||
* Create a working container, either from scratch or using an image as a starting point.
|
||||
* Mount a working container's root filesystem for manipulation.
|
||||
@@ -16,8 +16,38 @@ The buildah package provides a command line tool which can be used to:
|
||||
* Use the updated contents of a container's root filesystem as a filesystem layer to create a new image.
|
||||
* Delete a working container or an image.
|
||||
|
||||
This tool needs to be run as the root user.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--debug**
|
||||
|
||||
Print debugging information
|
||||
|
||||
**--default-mounts-file**
|
||||
|
||||
Path to default mounts file (default path: "/usr/share/containers/mounts.conf")
|
||||
|
||||
**--help, -h**
|
||||
|
||||
Show help
|
||||
|
||||
**--registries-conf** *path*
|
||||
|
||||
Pathname of the configuration file which specifies which registries should be
|
||||
consulted when completing image names which do not include a registry or domain
|
||||
portion. It is not recommended that this option be used, as the default
|
||||
behavior of using the system-wide configuration
|
||||
(*/etc/containers/registries.conf*) is most often preferred.
|
||||
|
||||
**--registries-conf-dir** *path*
|
||||
|
||||
Pathname of the directory which contains configuration snippets which specify
|
||||
registries which should be consulted when completing image names which do not
|
||||
include a registry or domain portion. It is not recommended that this option
|
||||
be used, as the default behavior of using the system-wide configuration
|
||||
(*/etc/containers/registries.d*) is most often preferred.
|
||||
|
||||
**--root** **value**
|
||||
|
||||
Storage root dir (default: "/var/lib/containers/storage")
|
||||
@@ -34,14 +64,6 @@ Storage driver
|
||||
|
||||
Storage driver option
|
||||
|
||||
**--debug**
|
||||
|
||||
Print debugging information
|
||||
|
||||
**--help, -h**
|
||||
|
||||
Show help
|
||||
|
||||
**--version, -v**
|
||||
|
||||
Print the version
|
||||
@@ -67,3 +89,5 @@ Print the version
|
||||
| buildah-run(1) | Run a command inside of the container. |
|
||||
| buildah-tag(1) | Add an additional name to a local image. |
|
||||
| buildah-umount(1) | Unmount a working container's root file system. |
|
||||
| buildah-version(1) | Display the Buildah Version Information
|
||||
|
|
||||
|
||||
238
docs/tutorials/01-intro.md
Normal file
@@ -0,0 +1,238 @@
|
||||

|
||||
|
||||
# Buildah Tutorial 1
|
||||
## Building OCI container images
|
||||
|
||||
The purpose of this tutorial is to demonstrate how Buildah can be used to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec). Images can be built from existing images, from scratch, and using Dockerfiles. OCI images built using the Buildah command line tool (CLI) and the underlying OCI based technologies (e.g. [containers/image](https://github.com/containers/image) and [containers/storage](https://github.com/containers/storage)) are portable and can therefore run in a Docker environment.
|
||||
|
||||
In brief the `containers/image` project provides mechanisms to copy, push, pull, inspect and sign container images. The `containers/storage` project provides mechanisms for storing filesystem layers, container images, and containers. Buildah is a CLI that takes advantage of these underlying projects and therefore allows you to build, move, and manage container images and containers.
|
||||
|
||||
First step is to install Buildah. Run as root because you will need to be root for running Buildah commands:
|
||||
|
||||
# dnf -y install buildah
|
||||
|
||||
After installing Buildah we can see there are no images installed. The `buildah images` command will list all the images:
|
||||
|
||||
# buildah images
|
||||
|
||||
We can also see that there are also no containers by running:
|
||||
|
||||
# buildah containers
|
||||
|
||||
When you build a working container from an existing image, Buildah defaults to appending '-working-container' to the image's name to construct a name for the container. The Buildah CLI conveniently returns the name of the new container. You can take advantage of this by assigning the returned value to a shell varible using standard shell assignment :
|
||||
|
||||
# container=$(buildah from fedora)
|
||||
|
||||
It is not required to assign a shell variable. Running `buildah from fedora` is sufficient. It just helps simplify commands later. To see the name of the container that we stored in the shell variable:
|
||||
|
||||
# echo $container
|
||||
|
||||
What can we do with this new container? Let's try running bash:
|
||||
|
||||
# buildah run $container bash
|
||||
|
||||
Notice we get a new shell prompt because we are running a bash shell inside of the container. It should be noted that `buildah run` is primarily intended for helping debug during the build process. A runtime like runc or a container interface like [CRI-O](https://github.com/kubernetes-incubator/cri-o) is more suited for starting containers in production.
|
||||
|
||||
Be sure to `exit` out of the container and let's try running something else:
|
||||
|
||||
# buildah run $container java
|
||||
|
||||
Oops. Java is not installed. A message containing something like the following was returned.
|
||||
|
||||
container_linux.go:274: starting container process caused "exec: \"java\": executable file not found in $PATH"
|
||||
|
||||
Lets try installing it using:
|
||||
|
||||
# buildah run $container -- dnf -y install java
|
||||
|
||||
The `--` syntax basically tells Buildah: there are no more `buildah run` command options after this point. The options after this point are for inside the containers shell. It is required if the command we specify includes command line options which are not meant for Buildah.
|
||||
|
||||
Now running `buildah run $container java` will show that Java has been installed. It will return the standard Java `Usage` output.
|
||||
|
||||
## Building a container from scratch
|
||||
|
||||
One of the advantages of using `buildah` to build OCI compliant container images is that you can easily build a container image from scratch and therefore exclude unnecessary packages from your image. E.g. most final container images for production probably don't need a package manager like `dnf`.
|
||||
|
||||
Let's build a container from scratch. The special "image" name "scratch" tells Buildah to create an empty container. The container has a small amount of metadata about the container but no real Linux content.
|
||||
|
||||
# newcontainer=$(buildah from scratch)
|
||||
|
||||
You can see this new empty container by running:
|
||||
|
||||
# buildah containers
|
||||
|
||||
You should see output similar to the following:
|
||||
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
82af3b9a9488 * 3d85fcda5754 docker.io/library/fedora:latest fedora-working-container
|
||||
ac8fa6be0f0a * scratch working-container
|
||||
|
||||
Its container name is working-container by default and it's stored in the `$newcontainer` variable. Notice the image name (IMAGE NAME) is "scratch". This just indicates that there is no real image yet. i.e. It is containers/storage but there is no representation in containers/image. So when we run:
|
||||
|
||||
# buildah images
|
||||
|
||||
We don't see the image listed. There is no corresponding scratch image. It is an empty container.
|
||||
|
||||
So does this container actually do anything? Let's see.
|
||||
|
||||
# buildah run $newcontainer bash
|
||||
|
||||
Nope. This really is empty. The package installer `dnf` is not even inside this container. It's essentially an empty layer on top of the kernel. So what can be done with that? Thankfully there is a `buildah mount` command.
|
||||
|
||||
# scratchmnt=$(buildah mount $newcontainer)
|
||||
|
||||
By echoing `$scratchmnt` we can see the path for the [overlay image](https://wiki.archlinux.org/index.php/Overlay_filesystem), which gives you a link directly to the root file system of the container.
|
||||
|
||||
# echo $scratchmnt
|
||||
/var/lib/containers/storage/overlay/b78d0e11957d15b5d1fe776293bd40a36c28825fb6cf76f407b4d0a95b2a200d/diff
|
||||
|
||||
Notice that the overlay image is under `/var/lib/containers/storage` as one would expect. (See above on `containers/storage` or for more information see [containers/storage](https://github.com/containers/storage).)
|
||||
|
||||
Now that we have a new empty container we can install or remove software packages or simply copy content into that container. So let's install `bash` and `coreutils` so that we can run bash scripts. This could easily be `nginx` or other packages needed for your container.
|
||||
|
||||
# dnf install --installroot $scratchmnt --release 26 bash coreutils --setopt install_weak_deps=false -y
|
||||
|
||||
Let's try it out (showing the prompt in this example to demonstrate the difference):
|
||||
|
||||
# buildah run $newcontainer bash
|
||||
bash-4.4# cd /usr/bin
|
||||
bash-4.4# ls
|
||||
bash-4.4# exit
|
||||
|
||||
Notice we have a `/usr/bin` directory in the newcontainer's image layer. Let's first copy a simple file from our host into the container. Create a file called runecho.sh which contains the following:
|
||||
|
||||
#!/bin/bash
|
||||
for i in `seq 0 9`;
|
||||
do
|
||||
echo "This is a new container from ipbabble [" $i "]"
|
||||
done
|
||||
|
||||
Change the permissions on the file so that it can be run:
|
||||
|
||||
# chmod +x runecho.sh
|
||||
|
||||
With `buildah` files can be copied into the new image and we can also configure the image to run commands. Let's copy this new command into the container's `/usr/bin` directory and configure the container to run the command when the container is run:
|
||||
|
||||
# buildah copy $newcontainer ./runecho.sh /usr/bin
|
||||
# buildah config --cmd /usr/bin/runecho.sh $newcontainer
|
||||
|
||||
Now run the container:
|
||||
|
||||
# buildah run $newcontainer
|
||||
This is a new container from ipbabble [ 0 ]
|
||||
This is a new container from ipbabble [ 1 ]
|
||||
This is a new container from ipbabble [ 2 ]
|
||||
This is a new container from ipbabble [ 3 ]
|
||||
This is a new container from ipbabble [ 4 ]
|
||||
This is a new container from ipbabble [ 5 ]
|
||||
This is a new container from ipbabble [ 6 ]
|
||||
This is a new container from ipbabble [ 7 ]
|
||||
This is a new container from ipbabble [ 8 ]
|
||||
This is a new container from ipbabble [ 9 ]
|
||||
|
||||
It works! Congratulations, you have built a new OCI container from scratch that uses bash scripting. Let's add some more configuration information.
|
||||
|
||||
# buildah config --created-by "ipbabble" $newcontainer
|
||||
# buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora26-bashecho $newcontainer
|
||||
|
||||
We can inspect the container's metadata using the `inspect` command:
|
||||
|
||||
# buildah inspect $newcontainer
|
||||
|
||||
We should probably unmount and commit the image:
|
||||
|
||||
# buildah unmount $newcontainer
|
||||
# buildah commit $newcontainer fedora-bashecho
|
||||
# buildah images
|
||||
|
||||
And you can see there is a new image called `fedora-bashecho:latest`. You can inspect the new image using:
|
||||
|
||||
# buildah inspect --type=image fedora-bashecho
|
||||
|
||||
Later when you want to create a new container or containers from this image, you simply need need to do `buildah from fedora-bashecho`. This will create a new containers based on this image for you.
|
||||
|
||||
Now that you have the new image you can remove the scratch container called working-container:
|
||||
|
||||
# buildah rm $newcontainer
|
||||
|
||||
or
|
||||
|
||||
# buildah rm working-container
|
||||
|
||||
## OCI images built using Buildah are portable
|
||||
|
||||
Let's test if this new OCI image is really portable to another OCI technology like Docker. First you should install Docker and start it. Notice that Docker requires a daemon process (that's quite big) in order to run any client commands. Buildah has no daemon requirement.
|
||||
|
||||
# dnf -y install docker
|
||||
# systemctl start docker
|
||||
|
||||
Let's copy that image from where containers/storage stores it to where the Docker daemon stores its images, so that we can run it using Docker. We can achieve this using `buildah push`. This copies the image to Docker's repository area which is located under `/var/lib/docker`. Docker's repository is managed by the Docker daemon. This needs to be explicitly stated by telling Buildah to push to the Docker repository protocol using `docker-daemon:`.
|
||||
|
||||
# buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
|
||||
|
||||
Under the covers, the containers/image library calls into the containers/storage library to read the image's contents, and sends them to the local Docker daemon. This can take a little while. And usually you won't need to do this. If you're using `buildah` you are probably not using Docker. This is just for demo purposes. Let's try it:
|
||||
|
||||
# docker run fedora-bashecho
|
||||
This is a new container from ipbabble [ 0 ]
|
||||
This is a new container from ipbabble [ 1 ]
|
||||
This is a new container from ipbabble [ 2 ]
|
||||
This is a new container from ipbabble [ 3 ]
|
||||
This is a new container from ipbabble [ 4 ]
|
||||
This is a new container from ipbabble [ 5 ]
|
||||
This is a new container from ipbabble [ 6 ]
|
||||
This is a new container from ipbabble [ 7 ]
|
||||
This is a new container from ipbabble [ 8 ]
|
||||
This is a new container from ipbabble [ 9 ]
|
||||
|
||||
OCI container images built with `buildah` are completely standard as expected. So now it might be time to run:
|
||||
|
||||
# dnf -y remove docker
|
||||
|
||||
## Using Dockerfiles with Buildah
|
||||
|
||||
What if you have been using Docker for a while and have some existing Dockerfiles. Not a problem. Buildah can build images using a Dockerfile. The `build-using-dockerfile`, or `bud` for short, takes a Dockerfile as input and produces an OCI image.
|
||||
|
||||
Find one of your Dockerfiles or create a file called Dockerfile. Use the following example or some variation if you'd like:
|
||||
|
||||
# Base on the Fedora
|
||||
FROM fedora:latest
|
||||
MAINTAINER ipbabble email buildahboy@redhat.com # not a real email
|
||||
|
||||
# Update image and install httpd
|
||||
RUN echo "Updating all fedora packages"; dnf -y update; dnf -y clean all
|
||||
RUN echo "Installing httpd"; dnf -y install httpd
|
||||
|
||||
# Expose the default httpd port 80
|
||||
EXPOSE 80
|
||||
|
||||
# Run the httpd
|
||||
CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
|
||||
|
||||
Now run `buildah bud` with the name of the Dockerfile and the name to be given to the created image (e.g. fedora-httpd):
|
||||
|
||||
# buildah bud -f Dockerfile -t fedora-httpd
|
||||
|
||||
or, because `buildah bud` defaults to Dockerfile (note the period at the end of the example):
|
||||
|
||||
# buildah bud -t fedora-httpd .
|
||||
|
||||
You will see all the steps of the Dockerfile executing. Afterwards `buildah images` will show you the new image. Now we need to create the container using `buildah from` and test it with `buildah run`:
|
||||
|
||||
# httpcontainer=$(buildah from fedora-httpd)
|
||||
# buildah run $httpcontainer
|
||||
|
||||
While that container is running, in another shell run:
|
||||
|
||||
# curl localhost
|
||||
|
||||
You will see the standard Apache webpage.
|
||||
|
||||
Why not try and modify the Dockerfile. Do not install httpd, but instead ADD the runecho.sh file and have it run as the CMD.
|
||||
|
||||
## Congratulations
|
||||
|
||||
Well done. You have learned a lot about Buildah using this short tutorial. Hopefully you followed along with the examples and found them to be sufficient. Be sure to look at Buildah's man pages to see the other useful commands you can use. Have fun playing.
|
||||
|
||||
If you have any suggestions or issues please post them at the [ProjectAtomic Buildah Issues page](https://github.com/projectatomic/buildah/issues).
|
||||
|
||||
For more information on Buildah and how you might contribute please visit the [Buildah home page on Github](https://github.com/projectatomic/buildah).
|
||||
134
docs/tutorials/02-registries-repositories.md
Normal file
@@ -0,0 +1,134 @@
|
||||

|
||||
|
||||
# Buildah Tutorial 2
|
||||
## Using Buildah with container registries
|
||||
|
||||
The purpose of this tutorial is to demonstrate how Buildah can be used to move OCI compliant images in and out of private or public registries.
|
||||
|
||||
In the [first tutorial](https://github.com/projectatomic/buildah/blob/master/docs/tutorials/01-intro.md) we built an image from scratch that we called `fedora-bashecho` and we pushed it to a local Docker repository using the `docker-daemon` protocol. We are going to use the same image to push to a private Docker registry.
|
||||
|
||||
First we must pull down a registry. As a shortcut we will save the container name that is returned from the `buildah from` command, into a bash variable called `registry`. This is just like we did in Tutorial 1:
|
||||
|
||||
# registry=$(buildah from registry)
|
||||
|
||||
It is worth pointing out that the `from` command can also use other protocols beyond the default (and implicity assumed) order that first looks in local containers-storage (containers-storage:) and then looks in the Docker hub (docker:). For example, if you already had a registry container image in a local Docker registry then you could use the following:
|
||||
|
||||
# registry=$(buildah from docker-daemon:registry:latest)
|
||||
|
||||
Then we need to start the registry. You should start the registry in a separate shell and leave it running there:
|
||||
|
||||
# buildah run $registry
|
||||
|
||||
If you would like to see more details as to what is going on inside the registry, especially if you are having problems with the registry, you can run the registry container in debug mode as follows:
|
||||
|
||||
# buildah --debug run $registry
|
||||
|
||||
You can use `--debug` on any Buildah command.
|
||||
|
||||
The registry is running and is waiting for requests to process. Notice that this registry is a Docker registry that we pulled from Docker hub and we are running it for this example using `buildah run`. There is no Docker daemon running at this time.
|
||||
|
||||
Let's push our image to the private registry. By default, Buildah is set up to expect secure connections to a registry. Therefore we will need to turn the TLS verification off using the `--tls-verify` flag. We also need to tell Buildah that the registry is on this local host ( i.e. localhost) and listening on port 5000. Similar to what you'd expect to do on multi-tenant Docker hub, we will explicitly specify that the registry is to store the image under the `ipbabble` repository - so as not to clash with other users' similarly named images.
|
||||
|
||||
# buildah push --tls-verify=false fedora-bashecho docker://localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
|
||||
[Skopeo](https://github.com/projectatomic/skopeo) is a ProjectAtomic tool that was created to inspect images in registries without having to pull the image from the registry. It has grown to have many other uses. We will verify that the image has been stored by using Skopeo to inspect the image in the registry:
|
||||
|
||||
# skopeo inspect --tls-verify=false docker://localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
{
|
||||
"Name": "localhost:5000/ipbabble/fedora-bashecho",
|
||||
"Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
|
||||
"RepoTags": [
|
||||
"latest"
|
||||
],
|
||||
"Created": "2017-12-05T21:38:12.311901938Z",
|
||||
"DockerVersion": "",
|
||||
"Labels": {
|
||||
"name": "fedora-bashecho"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
|
||||
]
|
||||
}
|
||||
|
||||
We can verify that it is still portable with Docker by starting Docker again, as we did in the first tutorial. Then we can pull down the image and starting the container using Docker:
|
||||
|
||||
# systemctl start docker
|
||||
# docker pull localhost:5000/ipbabble/fedora-bashecho
|
||||
Using default tag: latest
|
||||
Trying to pull repository localhost:5000/ipbabble/fedora-bashecho ...
|
||||
sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137: Pulling from localhost:5000/ipbabble/fedora-bashecho
|
||||
0cb7556c7147: Pull complete
|
||||
Digest: sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137
|
||||
Status: Downloaded newer image for localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
|
||||
# docker run localhost:5000/ipbabble/fedora-bashecho
|
||||
This is a new container named ipbabble [ 0 ]
|
||||
This is a new container named ipbabble [ 1 ]
|
||||
This is a new container named ipbabble [ 2 ]
|
||||
This is a new container named ipbabble [ 3 ]
|
||||
This is a new container named ipbabble [ 4 ]
|
||||
This is a new container named ipbabble [ 5 ]
|
||||
This is a new container named ipbabble [ 6 ]
|
||||
This is a new container named ipbabble [ 7 ]
|
||||
This is a new container named ipbabble [ 8 ]
|
||||
This is a new container named ipbabble [ 9 ]
|
||||
# systemctl stop docker
|
||||
|
||||
Pushing to Docker hub is just as easy. Of course you must have an account with credentials. In this example I'm using a Docker hub API key, which has the form "username:password" (example password has been edited for privacy), that I created with my Docker hub account. I use the `--creds` flag to use my API key. I also specify my local image name `fedora-bashecho` as my image source and I use the `docker` protocol with no host or port so that it will look at the default Docker hub registry:
|
||||
|
||||
# buildah push --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c fedora-bashecho docker://ipbabble/fedora-bashecho:latest
|
||||
|
||||
And let's inspect that with Skopeo:
|
||||
|
||||
# skopeo inspect --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c docker://ipbabble/fedora-bashecho:latest
|
||||
{
|
||||
"Name": "docker.io/ipbabble/fedora-bashecho",
|
||||
"Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
|
||||
"RepoTags": [
|
||||
"latest"
|
||||
],
|
||||
"Created": "2017-12-05T21:38:12.311901938Z",
|
||||
"DockerVersion": "",
|
||||
"Labels": {
|
||||
"name": "fedora-bashecho"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
|
||||
]
|
||||
}
|
||||
|
||||
We can use Buildah to pull down the image using the `buildah from` command. But before we do let's clean up our local containers-storage so that we don't have an existing fedora-bashecho - otherwise Buildah will know it already exists and not bother pulling it down.
|
||||
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
e31b0f0b0a63 docker.io/library/fedora-bashecho:latest Dec 5, 2017 21:38 772 B
|
||||
# buildah rmi fedora-bashecho
|
||||
untagged: docker.io/library/fedora-bashecho:latest
|
||||
e31b0f0b0a63e94c5a558d438d7490fab930a282a4736364360ab9b92cb25f3a
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
|
||||
Okay, so we don't have a fedora-bashecho anymore. Let's pull the image from Docker hub:
|
||||
|
||||
# buildah from ipbabble/fedora-bashecho
|
||||
|
||||
If you don't want to bother doing the remove image step (`rmi`) you can use the flag `--pull-always` to force the image to be pulled again and overwrite any corresponding local image.
|
||||
|
||||
Now check that image is in the local containers-storage:
|
||||
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
864871ac1c45 docker.io/ipbabble/fedora-bashecho:latest Dec 5, 2017 21:38 315.4 MB
|
||||
|
||||
Success!
|
||||
|
||||
If you have any suggestions or issues please post them at the [ProjectAtomic Buildah Issues page](https://github.com/projectatomic/buildah/issues).
|
||||
|
||||
For more information on Buildah and how you might contribute please visit the [Buildah home page on Github](https://github.com/projectatomic/buildah).
|
||||
16
docs/tutorials/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||

|
||||
|
||||
# Buildah Tutorials
|
||||
|
||||
## Links to a number of useful tutorials for the Buildah project.
|
||||
|
||||
**[Introduction Tutorial](https://github.com/projectatomic/buildah/tree/master/docs/tutorials/01-intro.md)**
|
||||
|
||||
Learn how to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec) using Buildah.
|
||||
|
||||
|
||||
**[Buildah and Registries Tutorial](https://github.com/projectatomic/buildah/tree/master/docs/tutorials/02-registries-repositories.md)**
|
||||
|
||||
Learn how Buildah can be used to move OCI compliant images in and out of private or public registries.
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ echo yay > $mountpoint1/file-in-root
|
||||
read
|
||||
: "[1m Produce an image from the container [0m"
|
||||
read
|
||||
buildah commit "$container1" containers-storage:${2:-first-new-image}
|
||||
buildah commit "$container1" ${2:-first-new-image}
|
||||
read
|
||||
: "[1m Verify that our new image is there [0m"
|
||||
read
|
||||
|
||||
17
examples/lighttpd.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=`buildah from ${1:-fedora}`
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run $ctr1 -- dnf update -y
|
||||
buildah run $ctr1 -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config $ctr1 --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf"
|
||||
buildah config $ctr1 --port 80
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit $ctr1 ${2:-$USER/lighttpd}
|
||||
219
image.go
@@ -2,6 +2,7 @@ package buildah
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -9,10 +10,8 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/docker"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,7 +42,6 @@ type containerImageRef struct {
|
||||
name reference.Named
|
||||
names []string
|
||||
layerID string
|
||||
addHistory bool
|
||||
oconfig []byte
|
||||
dconfig []byte
|
||||
created time.Time
|
||||
@@ -58,7 +57,6 @@ type containerImageSource struct {
|
||||
store storage.Store
|
||||
layerID string
|
||||
names []string
|
||||
addHistory bool
|
||||
compression archive.Compression
|
||||
config []byte
|
||||
configDigest digest.Digest
|
||||
@@ -67,33 +65,37 @@ type containerImageSource struct {
|
||||
exporting bool
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||
src, err := i.NewImageSource(sc, nil)
|
||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := i.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(src)
|
||||
return image.FromSource(sc, src)
|
||||
}
|
||||
|
||||
func selectManifestType(preferred string, acceptable, supported []string) string {
|
||||
selected := preferred
|
||||
for _, accept := range acceptable {
|
||||
if preferred == accept {
|
||||
return preferred
|
||||
}
|
||||
for _, support := range supported {
|
||||
if accept == support {
|
||||
selected = accept
|
||||
}
|
||||
func expectedOCIDiffIDs(image v1.Image) int {
|
||||
expected := 0
|
||||
for _, history := range image.History {
|
||||
if !history.EmptyLayer {
|
||||
expected = expected + 1
|
||||
}
|
||||
}
|
||||
return selected
|
||||
return expected
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
|
||||
func expectedDockerDiffIDs(image docker.V2Image) int {
|
||||
expected := 0
|
||||
for _, history := range image.History {
|
||||
if !history.EmptyLayer {
|
||||
expected = expected + 1
|
||||
}
|
||||
}
|
||||
return expected
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||
// Decide which type of manifest and configuration output we're going to provide.
|
||||
supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest}
|
||||
manifestType := selectManifestType(i.preferredManifestType, manifestTypes, supportedManifestTypes)
|
||||
manifestType := i.preferredManifestType
|
||||
// If it's not a format we support, return an error.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
@@ -143,11 +145,14 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
created := i.created
|
||||
oimage.Created = &created
|
||||
dimage := docker.V2Image{}
|
||||
err = json.Unmarshal(i.dconfig, &dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dimage.Created = created
|
||||
|
||||
// Start building manifests.
|
||||
omanifest := v1.Manifest{
|
||||
@@ -172,16 +177,46 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
}
|
||||
|
||||
oimage.RootFS.Type = docker.TypeLayers
|
||||
oimage.RootFS.DiffIDs = []string{}
|
||||
oimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
dimage.RootFS = &docker.V2S2RootFS{}
|
||||
dimage.RootFS.Type = docker.TypeLayers
|
||||
dimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
for _, layerID := range layers {
|
||||
// The default layer media type assumes no compression.
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
// Figure out which media type we want to call this. Assume no compression.
|
||||
// If we're not re-exporting the data, reuse the blobsum and diff IDs.
|
||||
if !i.exporting && layerID != i.layerID {
|
||||
layer, err2 := i.store.Layer(layerID)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
|
||||
}
|
||||
if layer.UncompressedDigest == "" {
|
||||
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
|
||||
}
|
||||
layerBlobSum := layer.UncompressedDigest
|
||||
layerBlobSize := layer.UncompressedSize
|
||||
// Note this layer in the manifest, using the uncompressed blobsum.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Note this layer in the list of diffIDs, again using the uncompressed blobsum.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
|
||||
continue
|
||||
}
|
||||
// Figure out if we need to change the media type, in case we're using compression.
|
||||
if i.compression != archive.Uncompressed {
|
||||
switch i.compression {
|
||||
case archive.Gzip:
|
||||
@@ -192,50 +227,26 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return nil, errors.New("media type for bzip2-compressed layers is not defined")
|
||||
case archive.Xz:
|
||||
// Until the image specs define a media type for xz-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with xz.
|
||||
return nil, errors.New("media type for xz-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
||||
}
|
||||
}
|
||||
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
||||
if !i.exporting {
|
||||
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
||||
// Add a note in the manifest about the layer. The blobs should be identified by their
|
||||
// possibly-compressed blob digests, but just use the layer IDs here.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
||||
// just use the layer ID here.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest.String())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
continue
|
||||
}
|
||||
// Start reading the layer.
|
||||
rc, err := i.store.Diff("", layerID)
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
rc, err := i.store.Diff("", layerID, diffOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
||||
}
|
||||
defer rc.Close()
|
||||
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
||||
// differences, the result may not match the digest the blob had when it was originally imported,
|
||||
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
||||
// correct.
|
||||
uncompressed, err := archive.DecompressStream(rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
||||
}
|
||||
defer uncompressed.Close()
|
||||
srcHasher := digest.Canonical.Digester()
|
||||
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
||||
reader := io.TeeReader(rc, srcHasher.Hash())
|
||||
// Set up to write the possibly-recompressed blob.
|
||||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
@@ -244,7 +255,7 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
destHasher := digest.Canonical.Digester()
|
||||
counter := ioutils.NewWriteCounter(layerFile)
|
||||
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
||||
// Compress the layer, if we're compressing it.
|
||||
// Compress the layer, if we're recompressing it.
|
||||
writer, err := archive.CompressStream(multiWriter, i.compression)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
||||
@@ -282,27 +293,36 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
Size: size,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which is always an uncompressed value.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest().String())
|
||||
// Add a note about the diffID, which is always the layer's uncompressed digest.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
}
|
||||
|
||||
if i.addHistory {
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: &i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
|
||||
// Sanity check that we didn't just create a mismatch between non-empty layers in the
|
||||
// history and the number of diffIDs.
|
||||
expectedDiffIDs := expectedOCIDiffIDs(oimage)
|
||||
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
|
||||
}
|
||||
expectedDiffIDs = expectedDockerDiffIDs(dimage)
|
||||
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
|
||||
}
|
||||
|
||||
// Encode the image configuration blob.
|
||||
@@ -362,7 +382,6 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestType
|
||||
store: i.store,
|
||||
layerID: i.layerID,
|
||||
names: i.names,
|
||||
addHistory: i.addHistory,
|
||||
compression: i.compression,
|
||||
config: config,
|
||||
configDigest: digest.Canonical.FromBytes(config),
|
||||
@@ -417,16 +436,22 @@ func (i *containerImageSource) Reference() types.ImageReference {
|
||||
return i.ref
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetSignatures() ([][]byte, error) {
|
||||
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||
return nil, errors.Errorf("TODO")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return []byte{}, "", errors.Errorf("TODO")
|
||||
func (i *containerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||
return nil, "", errors.Errorf("TODO")
|
||||
}
|
||||
return i.manifest, i.manifestType, nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetManifest() ([]byte, string, error) {
|
||||
return i.manifest, i.manifestType, nil
|
||||
func (i *containerImageSource) LayerInfosForCopy() []types.BlobInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
||||
@@ -460,10 +485,14 @@ func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadClose
|
||||
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
||||
}
|
||||
|
||||
func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
var name reference.Named
|
||||
if len(names) > 0 {
|
||||
if parsed, err := reference.ParseNamed(names[0]); err == nil {
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||
}
|
||||
if len(container.Names) > 0 {
|
||||
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
|
||||
name = parsed
|
||||
}
|
||||
}
|
||||
@@ -486,9 +515,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
||||
store: b.store,
|
||||
compression: compress,
|
||||
name: name,
|
||||
names: names,
|
||||
layerID: layerID,
|
||||
addHistory: addHistory,
|
||||
names: container.Names,
|
||||
layerID: container.LayerID,
|
||||
oconfig: oconfig,
|
||||
dconfig: dconfig,
|
||||
created: created,
|
||||
@@ -499,18 +527,3 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (b *Builder) makeContainerImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
if manifestType == "" {
|
||||
manifestType = OCIv1ImageManifest
|
||||
}
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||
}
|
||||
return b.makeImageRef(manifestType, exporting, true, compress, container.Names, container.LayerID, historyTimestamp)
|
||||
}
|
||||
|
||||
func (b *Builder) makeImageImageRef(compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
return b.makeImageRef(manifest.GuessMIMEType(b.Manifest), true, false, compress, names, layerID, historyTimestamp)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,8 +51,13 @@ type BuildOptions struct {
|
||||
PullPolicy int
|
||||
// Registry is a value which is prepended to the image's name, if it
|
||||
// needs to be pulled and the image name alone can not be resolved to a
|
||||
// reference to a source image.
|
||||
// reference to a source image. No separator is implicitly added.
|
||||
Registry string
|
||||
// Transport is a value which is prepended to the image's name, if it
|
||||
// needs to be pulled and the image name alone, or the image name and
|
||||
// the registry together, can not be resolved to a reference to a
|
||||
// source image. No separator is implicitly added.
|
||||
Transport string
|
||||
// IgnoreUnrecognizedInstructions tells us to just log instructions we
|
||||
// don't recognize, and try to keep going.
|
||||
IgnoreUnrecognizedInstructions bool
|
||||
@@ -98,6 +103,8 @@ type BuildOptions struct {
|
||||
// configuration data.
|
||||
// Accepted values are OCIv1ImageFormat and Dockerv2ImageFormat.
|
||||
OutputFormat string
|
||||
// SystemContext holds parameters used for authentication.
|
||||
SystemContext *types.SystemContext
|
||||
}
|
||||
|
||||
// Executor is a buildah-based implementation of the imagebuilder.Executor
|
||||
@@ -108,6 +115,7 @@ type Executor struct {
|
||||
builder *buildah.Builder
|
||||
pullPolicy int
|
||||
registry string
|
||||
transport string
|
||||
ignoreUnrecognizedInstructions bool
|
||||
quiet bool
|
||||
runtime string
|
||||
@@ -130,14 +138,6 @@ type Executor struct {
|
||||
reportWriter io.Writer
|
||||
}
|
||||
|
||||
func makeSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
// Preserve informs the executor that from this point on, it needs to ensure
|
||||
// that only COPY and ADD instructions can modify the contents of this
|
||||
// directory or anything below it.
|
||||
@@ -153,7 +153,15 @@ func (b *Executor) Preserve(path string) error {
|
||||
logrus.Debugf("PRESERVE %q", path)
|
||||
if b.volumes.Covers(path) {
|
||||
// This path is already a subdirectory of a volume path that
|
||||
// we're already preserving, so there's nothing new to be done.
|
||||
// we're already preserving, so there's nothing new to be done
|
||||
// except ensure that it exists.
|
||||
archivedPath := filepath.Join(b.mountPoint, path)
|
||||
if err := os.MkdirAll(archivedPath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
|
||||
}
|
||||
if err := b.volumeCacheInvalidate(path); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Figure out where the cache for this volume would be stored.
|
||||
@@ -166,9 +174,15 @@ func (b *Executor) Preserve(path string) error {
|
||||
// Save info about the top level of the location that we'll be archiving.
|
||||
archivedPath := filepath.Join(b.mountPoint, path)
|
||||
st, err := os.Stat(archivedPath)
|
||||
if os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(archivedPath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
|
||||
}
|
||||
st, err = os.Stat(archivedPath)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
|
||||
return err
|
||||
return errors.Wrapf(err, "error reading info about volume path %q", archivedPath)
|
||||
}
|
||||
b.volumeCacheInfo[path] = st
|
||||
if !b.volumes.Add(path) {
|
||||
@@ -241,6 +255,9 @@ func (b *Executor) volumeCacheSave() error {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile)
|
||||
}
|
||||
if err := os.MkdirAll(archivedPath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
|
||||
}
|
||||
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
|
||||
cache, err := os.Create(cacheFile)
|
||||
if err != nil {
|
||||
@@ -273,7 +290,7 @@ func (b *Executor) volumeCacheRestore() error {
|
||||
if err := os.RemoveAll(archivedPath); err != nil {
|
||||
return errors.Wrapf(err, "error clearing volume path %q", archivedPath)
|
||||
}
|
||||
if err := os.MkdirAll(archivedPath, 0700); err != nil {
|
||||
if err := os.MkdirAll(archivedPath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error recreating volume path %q", archivedPath)
|
||||
}
|
||||
err = archive.Untar(cache, archivedPath, nil)
|
||||
@@ -311,7 +328,7 @@ func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||
sources = append(sources, filepath.Join(b.contextDir, src))
|
||||
}
|
||||
}
|
||||
if err := b.builder.Add(copy.Dest, copy.Download, sources...); err != nil {
|
||||
if err := b.builder.Add(copy.Dest, copy.Download, buildah.AddAndCopyOptions{}, sources...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -350,6 +367,7 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
Entrypoint: config.Entrypoint,
|
||||
Cmd: config.Cmd,
|
||||
NetworkDisabled: config.NetworkDisabled,
|
||||
Quiet: b.quiet,
|
||||
}
|
||||
|
||||
args := run.Args
|
||||
@@ -371,12 +389,23 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
// UnrecognizedInstruction is called when we encounter an instruction that the
|
||||
// imagebuilder parser didn't understand.
|
||||
func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error {
|
||||
if !b.ignoreUnrecognizedInstructions {
|
||||
logrus.Debugf("+(UNIMPLEMENTED?) %#v", step)
|
||||
err_str := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command)
|
||||
err := fmt.Sprintf(err_str+"%#v", step)
|
||||
if b.ignoreUnrecognizedInstructions {
|
||||
logrus.Debugf(err)
|
||||
return nil
|
||||
}
|
||||
logrus.Errorf("+(UNIMPLEMENTED?) %#v", step)
|
||||
return errors.Errorf("Unrecognized instruction: %#v", step)
|
||||
|
||||
switch logrus.GetLevel() {
|
||||
case logrus.ErrorLevel:
|
||||
logrus.Errorf(err_str)
|
||||
case logrus.DebugLevel:
|
||||
logrus.Debugf(err)
|
||||
default:
|
||||
logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
|
||||
}
|
||||
|
||||
return errors.Errorf(err)
|
||||
}
|
||||
|
||||
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
|
||||
@@ -386,6 +415,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
|
||||
contextDir: options.ContextDirectory,
|
||||
pullPolicy: options.PullPolicy,
|
||||
registry: options.Registry,
|
||||
transport: options.Transport,
|
||||
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
|
||||
quiet: options.Quiet,
|
||||
runtime: options.Runtime,
|
||||
@@ -396,7 +426,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
|
||||
outputFormat: options.OutputFormat,
|
||||
additionalTags: options.AdditionalTags,
|
||||
signaturePolicyPath: options.SignaturePolicyPath,
|
||||
systemContext: makeSystemContext(options.SignaturePolicyPath),
|
||||
systemContext: options.SystemContext,
|
||||
volumeCache: make(map[string]string),
|
||||
volumeCacheInfo: make(map[string]os.FileInfo),
|
||||
log: options.Log,
|
||||
@@ -441,8 +471,10 @@ func (b *Executor) Prepare(ib *imagebuilder.Builder, node *parser.Node, from str
|
||||
FromImage: from,
|
||||
PullPolicy: b.pullPolicy,
|
||||
Registry: b.registry,
|
||||
Transport: b.transport,
|
||||
SignaturePolicyPath: b.signaturePolicyPath,
|
||||
ReportWriter: b.reportWriter,
|
||||
SystemContext: b.systemContext,
|
||||
}
|
||||
builder, err := buildah.NewBuilder(b.store, builderOptions)
|
||||
if err != nil {
|
||||
@@ -489,7 +521,7 @@ func (b *Executor) Prepare(ib *imagebuilder.Builder, node *parser.Node, from str
|
||||
}
|
||||
return errors.Wrapf(err, "error updating build context")
|
||||
}
|
||||
mountPoint, err := builder.Mount("")
|
||||
mountPoint, err := builder.Mount(builder.MountLabel)
|
||||
if err != nil {
|
||||
if err2 := builder.Delete(); err2 != nil {
|
||||
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
|
||||
@@ -546,6 +578,8 @@ func (b *Executor) Commit(ib *imagebuilder.Builder) (err error) {
|
||||
if err2 == nil {
|
||||
imageRef = imageRef2
|
||||
err = nil
|
||||
} else {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -554,6 +588,9 @@ func (b *Executor) Commit(ib *imagebuilder.Builder) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing reference for image to be written")
|
||||
}
|
||||
if ib.Author != "" {
|
||||
b.builder.SetMaintainer(ib.Author)
|
||||
}
|
||||
config := ib.Config()
|
||||
b.builder.SetHostname(config.Hostname)
|
||||
b.builder.SetDomainname(config.Domainname)
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func cloneToDirectory(url, dir string) error {
|
||||
|
||||
36
import.go
@@ -16,9 +16,9 @@ func importBuilderDataFromImage(store storage.Store, systemContext *types.System
|
||||
imageName := ""
|
||||
|
||||
if imageID != "" {
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+imageID)
|
||||
ref, err := is.Transport.ParseStoreReference(store, imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", "@"+imageID)
|
||||
return nil, errors.Wrapf(err, "no such image %q", imageID)
|
||||
}
|
||||
src, err2 := ref.NewImage(systemContext)
|
||||
if err2 != nil {
|
||||
@@ -68,7 +68,7 @@ func importBuilder(store storage.Store, options ImportOptions) (*Builder, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath)
|
||||
|
||||
builder, err := importBuilderDataFromImage(store, systemContext, c.ImageID, options.Container, c.ID)
|
||||
if err != nil {
|
||||
@@ -95,21 +95,27 @@ func importBuilder(store storage.Store, options ImportOptions) (*Builder, error)
|
||||
}
|
||||
|
||||
func importBuilderFromImage(store storage.Store, options ImportFromImageOptions) (*Builder, error) {
|
||||
var img *storage.Image
|
||||
var err error
|
||||
|
||||
if options.Image == "" {
|
||||
return nil, errors.Errorf("image name must be specified")
|
||||
}
|
||||
|
||||
img, err := util.FindImage(store, options.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
|
||||
for _, image := range util.ResolveName(options.Image, "", systemContext, store) {
|
||||
img, err = util.FindImage(store, image)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
builder, err2 := importBuilderDataFromImage(store, systemContext, img.ID, "", "")
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error importing build settings from image %q", options.Image)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
|
||||
builder, err := importBuilderDataFromImage(store, systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
|
||||
}
|
||||
|
||||
142
install.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Installation Instructions
|
||||
|
||||
## System Requirements
|
||||
|
||||
### Kernel Version Requirements
|
||||
To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required.
|
||||
On other Linux distributions Buildah requires a kernel version of 4.0 or
|
||||
higher in order to support the OverlayFS filesystem. The kernel version can be checked
|
||||
with the 'uname -a' command.
|
||||
|
||||
### runc Requirement
|
||||
|
||||
Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
|
||||
[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases. If Buildah is installed
|
||||
via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process.
|
||||
|
||||
## Package Installation
|
||||
|
||||
Buildah is available on several software repositories and can be installed via a package manager such
|
||||
as yum, dnf or apt-get on a number of Linux distributions.
|
||||
|
||||
## Installation from GitHub
|
||||
|
||||
Prior to installing Buildah, install the following packages on your Linux distro:
|
||||
* make
|
||||
* golang (Requires version 1.8.1 or higher.)
|
||||
* bats
|
||||
* btrfs-progs-devel
|
||||
* bzip2
|
||||
* device-mapper-devel
|
||||
* git
|
||||
* go-md2man
|
||||
* gpgme-devel
|
||||
* glib2-devel
|
||||
* libassuan-devel
|
||||
* ostree-devel
|
||||
* runc (Requires version 1.0 RC4 or higher.)
|
||||
* skopeo-containers
|
||||
|
||||
### Fedora
|
||||
|
||||
In Fedora, you can use this command:
|
||||
|
||||
```
|
||||
dnf -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
Then to install Buildah on Fedora follow the steps in this example:
|
||||
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
make
|
||||
sudo make install
|
||||
buildah --help
|
||||
```
|
||||
|
||||
### RHEL, CentOS
|
||||
|
||||
In RHEL and CentOS 7, ensure that you are subscribed to `rhel-7-server-rpms`,
|
||||
`rhel-7-server-extras-rpms`, and `rhel-7-server-optional-rpms`, then
|
||||
run this command:
|
||||
|
||||
```
|
||||
yum -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
The build steps for Buildah on RHEL or CentOS are the same as Fedora, above.
|
||||
|
||||
### Ubuntu
|
||||
|
||||
In Ubuntu zesty and xenial, you can use these commands:
|
||||
|
||||
```
|
||||
apt-get -y install software-properties-common
|
||||
add-apt-repository -y ppa:alexlarsson/flatpak
|
||||
add-apt-repository -y ppa:gophers/archive
|
||||
apt-add-repository -y ppa:projectatomic/ppa
|
||||
apt-get -y -qq update
|
||||
apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
apt-get -y install golang-1.8
|
||||
```
|
||||
Then to install Buildah on Ubuntu follow the steps in this example:
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
PATH=/usr/lib/go-1.8/bin:$PATH make runc all TAGS="apparmor seccomp"
|
||||
sudo make install install.runc
|
||||
buildah --help
|
||||
```
|
||||
|
||||
### Debian
|
||||
|
||||
To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch):
|
||||
|
||||
```
|
||||
gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
|
||||
gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
|
||||
echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
|
||||
apt update
|
||||
apt -y install -t stretch-backports libostree-dev golang
|
||||
apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
```
|
||||
|
||||
The build steps on Debian are otherwise the same as Ubuntu, above.
|
||||
2888
logos/buildah-logo-source.svg
Normal file
|
After Width: | Height: | Size: 170 KiB |
BIN
logos/buildah-logo_large.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
logos/buildah-logo_large_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
logos/buildah-logo_medium.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
logos/buildah-logo_medium_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
logos/buildah-logo_reverse_large.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
logos/buildah-logo_reverse_medium.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
logos/buildah-logo_reverse_small.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
logos/buildah-logo_small.png
Normal file
|
After Width: | Height: | Size: 7.7 KiB |
BIN
logos/buildah-logo_small_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 7.0 KiB |
BIN
logos/buildah-logomark_large.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
logos/buildah-logomark_large_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
logos/buildah-logomark_medium.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
logos/buildah-logomark_medium_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 9.8 KiB |
BIN
logos/buildah-logomark_small.png
Normal file
|
After Width: | Height: | Size: 4.6 KiB |
BIN
logos/buildah-logomark_small_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 4.1 KiB |
324
new.go
@@ -2,58 +2,249 @@ package buildah
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// BaseImageFakeName is the "name" of a source image which we interpret
|
||||
// as "no image".
|
||||
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
|
||||
|
||||
// DefaultTransport is a prefix that we apply to an image name if we
|
||||
// can't find one in the local Store, in order to generate a source
|
||||
// reference for the image that we can then copy to the local Store.
|
||||
DefaultTransport = "docker://"
|
||||
|
||||
// minimumTruncatedIDLength is the minimum length of an identifier that
|
||||
// we'll accept as possibly being a truncated image ID.
|
||||
minimumTruncatedIDLength = 3
|
||||
)
|
||||
|
||||
func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
var img *storage.Image
|
||||
manifest := []byte{}
|
||||
config := []byte{}
|
||||
func reserveSELinuxLabels(store storage.Store, id string) error {
|
||||
if selinux.GetEnabled() {
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
if id == c.ID {
|
||||
continue
|
||||
} else {
|
||||
b, err := OpenBuilder(store, c.ID)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Ignore not exist errors since containers probably created by other tool
|
||||
// TODO, we need to read other containers json data to reserve their SELinux labels
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Prevent containers from using same MCS Label
|
||||
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullAndFindImage(store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
|
||||
ref, err := pullImage(store, imageName, options, sc)
|
||||
if err != nil {
|
||||
logrus.Debugf("error pulling image %q: %v", imageName, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
img, err := is.Transport.GetStoreImage(store, ref)
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading pulled image %q: %v", imageName, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return img, ref, nil
|
||||
}
|
||||
|
||||
func getImageName(name string, img *storage.Image) string {
|
||||
imageName := name
|
||||
if len(img.Names) > 0 {
|
||||
imageName = img.Names[0]
|
||||
// When the image used by the container is a tagged image
|
||||
// the container name might be set to the original image instead of
|
||||
// the image given in the "form" command line.
|
||||
// This loop is supposed to fix this.
|
||||
for _, n := range img.Names {
|
||||
if strings.Contains(n, name) {
|
||||
imageName = n
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return imageName
|
||||
}
|
||||
|
||||
func imageNamePrefix(imageName string) string {
|
||||
prefix := imageName
|
||||
s := strings.Split(imageName, "/")
|
||||
if len(s) > 0 {
|
||||
prefix = s[len(s)-1]
|
||||
}
|
||||
s = strings.Split(prefix, ":")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
s = strings.Split(prefix, "@")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func imageManifestAndConfig(ref types.ImageReference, systemContext *types.SystemContext) (manifest, config []byte, err error) {
|
||||
if ref != nil {
|
||||
src, err := ref.NewImage(systemContext)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
|
||||
}
|
||||
defer src.Close()
|
||||
config, err := src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(ref))
|
||||
}
|
||||
manifest, _, err := src.Manifest()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(ref))
|
||||
}
|
||||
return manifest, config, nil
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
var ref types.ImageReference
|
||||
var img *storage.Image
|
||||
var err error
|
||||
var manifest []byte
|
||||
var config []byte
|
||||
|
||||
name := "working-container"
|
||||
if options.FromImage == BaseImageFakeName {
|
||||
options.FromImage = ""
|
||||
}
|
||||
if options.Transport == "" {
|
||||
options.Transport = DefaultTransport
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
|
||||
for _, image := range util.ResolveName(options.FromImage, options.Registry, systemContext, store) {
|
||||
if len(image) >= minimumTruncatedIDLength {
|
||||
if img, err = store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) {
|
||||
if ref, err = is.Transport.ParseStoreReference(store, img.ID); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if options.PullPolicy == PullAlways {
|
||||
pulledImg, pulledReference, err2 := pullAndFindImage(store, image, options, systemContext)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error pulling and reading image %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
ref = pulledReference
|
||||
img = pulledImg
|
||||
break
|
||||
}
|
||||
|
||||
srcRef, err2 := alltransports.ParseImageName(image)
|
||||
if err2 != nil {
|
||||
if options.Transport == "" {
|
||||
logrus.Debugf("error parsing image name %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
srcRef2, err3 := alltransports.ParseImageName(options.Transport + image)
|
||||
if err3 != nil {
|
||||
logrus.Debugf("error parsing image name %q: %v", image, err2)
|
||||
err = err3
|
||||
continue
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
|
||||
destImage, err2 := localImageNameForReference(store, srcRef)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
if destImage == "" {
|
||||
return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
ref, err = is.Transport.ParseStoreReference(store, destImage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
|
||||
}
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing {
|
||||
logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err)
|
||||
continue
|
||||
}
|
||||
pulledImg, pulledReference, err2 := pullAndFindImage(store, image, options, systemContext)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error pulling and reading image %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
ref = pulledReference
|
||||
img = pulledImg
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if options.FromImage != "" && (ref == nil || img == nil) {
|
||||
// If options.FromImage is set but we ended up
|
||||
// with nil in ref or in img then there was an error that
|
||||
// we should return.
|
||||
return nil, util.GetFailureCause(err, errors.Wrapf(storage.ErrImageUnknown, "no such image %q", options.FromImage))
|
||||
}
|
||||
image := options.FromImage
|
||||
imageID := ""
|
||||
if img != nil {
|
||||
image = getImageName(imageNamePrefix(image), img)
|
||||
imageID = img.ID
|
||||
}
|
||||
if manifest, config, err = imageManifestAndConfig(ref, systemContext); err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading data from image %q", transports.ImageName(ref))
|
||||
}
|
||||
|
||||
name := "working-container"
|
||||
if options.Container != "" {
|
||||
name = options.Container
|
||||
} else {
|
||||
var err2 error
|
||||
if image != "" {
|
||||
prefix := image
|
||||
s := strings.Split(prefix, "/")
|
||||
if len(s) > 0 {
|
||||
prefix = s[len(s)-1]
|
||||
}
|
||||
s = strings.Split(prefix, ":")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
s = strings.Split(prefix, "@")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
name = prefix + "-" + name
|
||||
name = imageNamePrefix(image) + "-" + name
|
||||
}
|
||||
}
|
||||
if name != "" {
|
||||
var err error
|
||||
suffix := 1
|
||||
tmpName := name
|
||||
for err != storage.ErrContainerUnknown {
|
||||
_, err = store.Container(tmpName)
|
||||
if err == nil {
|
||||
for errors.Cause(err2) != storage.ErrContainerUnknown {
|
||||
_, err2 = store.Container(tmpName)
|
||||
if err2 == nil {
|
||||
suffix++
|
||||
tmpName = fmt.Sprintf("%s-%d", name, suffix)
|
||||
}
|
||||
@@ -61,54 +252,6 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
name = tmpName
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
|
||||
imageID := ""
|
||||
if image != "" {
|
||||
if options.PullPolicy == PullAlways {
|
||||
err := pullImage(store, options, systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error pulling image %q", image)
|
||||
}
|
||||
}
|
||||
ref, err := is.Transport.ParseStoreReference(store, image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||
}
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
if err != nil {
|
||||
if err == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing {
|
||||
return nil, errors.Wrapf(err, "no such image %q", image)
|
||||
}
|
||||
err = pullImage(store, options, systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error pulling image %q", image)
|
||||
}
|
||||
ref, err = is.Transport.ParseStoreReference(store, image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||
}
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", image)
|
||||
}
|
||||
imageID = img.ID
|
||||
src, err := ref.NewImage(systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image")
|
||||
}
|
||||
defer src.Close()
|
||||
config, err = src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration")
|
||||
}
|
||||
manifest, _, err = src.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest")
|
||||
}
|
||||
}
|
||||
|
||||
coptions := storage.ContainerOptions{}
|
||||
container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
|
||||
if err != nil {
|
||||
@@ -123,21 +266,32 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err = reserveSELinuxLabels(store, container.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
processLabel, mountLabel, err := label.InitLabels(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
builder := &Builder{
|
||||
store: store,
|
||||
Type: containerType,
|
||||
FromImage: image,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: name,
|
||||
ContainerID: container.ID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
store: store,
|
||||
Type: containerType,
|
||||
FromImage: image,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: name,
|
||||
ContainerID: container.ID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
ProcessLabel: processLabel,
|
||||
MountLabel: mountLabel,
|
||||
DefaultMountsFilePath: options.DefaultMountsFilePath,
|
||||
}
|
||||
|
||||
if options.Mount {
|
||||
_, err = builder.Mount("")
|
||||
_, err = builder.Mount(mountLabel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting build container")
|
||||
}
|
||||
|
||||
28
new_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage"
|
||||
)
|
||||
|
||||
func TestGetImageName(t *testing.T) {
|
||||
tt := []struct {
|
||||
caseName string
|
||||
name string
|
||||
names []string
|
||||
expected string
|
||||
}{
|
||||
{"tagged image", "busybox1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox1:latest"},
|
||||
{"image name not in the resolved image names", "image1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox:latest"},
|
||||
{"resolved image with empty name list", "image1", []string{}, "image1"},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
img := &storage.Image{Names: tc.names}
|
||||
res := getImageName(tc.name, img)
|
||||
if res != tc.expected {
|
||||
t.Errorf("test case '%s' failed: expected %#v but got %#v", tc.caseName, tc.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
4
ostree_tag.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
if ! pkg-config ostree-1 2> /dev/null ; then
|
||||
echo containers_image_ostree_stub
|
||||
fi
|
||||
97
pull.go
@@ -1,58 +1,113 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"strings"
|
||||
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/signature"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func pullImage(store storage.Store, options BuilderOptions, sc *types.SystemContext) error {
|
||||
name := options.FromImage
|
||||
|
||||
spec := name
|
||||
if options.Registry != "" {
|
||||
spec = options.Registry + spec
|
||||
func localImageNameForReference(store storage.Store, srcRef types.ImageReference) (string, error) {
|
||||
if srcRef == nil {
|
||||
return "", errors.Errorf("reference to image is empty")
|
||||
}
|
||||
ref := srcRef.DockerReference()
|
||||
if ref == nil {
|
||||
name := srcRef.StringWithinTransport()
|
||||
_, err := is.Transport.ParseStoreReference(store, name)
|
||||
if err == nil {
|
||||
return name, nil
|
||||
}
|
||||
if strings.LastIndex(name, "/") != -1 {
|
||||
name = name[strings.LastIndex(name, "/")+1:]
|
||||
_, err = is.Transport.ParseStoreReference(store, name)
|
||||
if err == nil {
|
||||
return name, nil
|
||||
}
|
||||
}
|
||||
return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(name)
|
||||
name := ""
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
name = named.Name()
|
||||
if namedTagged, ok := ref.(reference.NamedTagged); ok {
|
||||
name = name + ":" + namedTagged.Tag()
|
||||
}
|
||||
if canonical, ok := ref.(reference.Canonical); ok {
|
||||
name = name + "@" + canonical.Digest().String()
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := is.Transport.ParseStoreReference(store, name); err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing computed local image name %q", name)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func pullImage(store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (types.ImageReference, error) {
|
||||
spec := imageName
|
||||
srcRef, err := alltransports.ParseImageName(spec)
|
||||
if err != nil {
|
||||
if options.Transport == "" {
|
||||
return nil, errors.Wrapf(err, "error parsing image name %q", spec)
|
||||
}
|
||||
spec = options.Transport + spec
|
||||
srcRef2, err2 := alltransports.ParseImageName(spec)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error parsing image name %q", spec)
|
||||
return nil, errors.Wrapf(err2, "error parsing image name %q", spec)
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
|
||||
if ref := srcRef.DockerReference(); ref != nil {
|
||||
name = srcRef.DockerReference().Name()
|
||||
if tagged, ok := srcRef.DockerReference().(reference.NamedTagged); ok {
|
||||
name = name + ":" + tagged.Tag()
|
||||
}
|
||||
destName, err := localImageNameForReference(store, srcRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
if destName == "" {
|
||||
return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
destRef, err := is.Transport.ParseStoreReference(store, name)
|
||||
destRef, err := is.Transport.ParseStoreReference(store, destName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing full image name %q", name)
|
||||
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
|
||||
}
|
||||
|
||||
img, err := srcRef.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error initializing %q as an image source", spec)
|
||||
}
|
||||
img.Close()
|
||||
|
||||
policy, err := signature.DefaultPolicy(sc)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, errors.Wrapf(err, "error obtaining default signature policy")
|
||||
}
|
||||
|
||||
policyContext, err := signature.NewPolicyContext(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, errors.Wrapf(err, "error creating new signature policy context")
|
||||
}
|
||||
|
||||
logrus.Debugf("copying %q to %q", spec, name)
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
logrus.Debugf("error destroying signature policy context: %v", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
err = cp.Image(policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter))
|
||||
return err
|
||||
logrus.Debugf("copying %q to %q", spec, destName)
|
||||
|
||||
err = cp.Image(policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, options.SystemContext, nil, ""))
|
||||
if err == nil {
|
||||
return destRef, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
129
run.go
@@ -8,11 +8,14 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -62,9 +65,11 @@ type RunOptions struct {
|
||||
// decision can be overridden by specifying either WithTerminal or
|
||||
// WithoutTerminal.
|
||||
Terminal int
|
||||
// Quiet tells the run to turn off output to stdout.
|
||||
Quiet bool
|
||||
}
|
||||
|
||||
func setupMounts(spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volumes []string) error {
|
||||
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volumes []string) error {
|
||||
// The passed-in mounts matter the most to us.
|
||||
mounts := make([]specs.Mount, len(optionMounts))
|
||||
copy(mounts, optionMounts)
|
||||
@@ -98,17 +103,56 @@ func setupMounts(spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volume
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
}
|
||||
// Add tmpfs filesystems at volume locations, unless we already have something there.
|
||||
for _, volume := range volumes {
|
||||
if haveMount(volume) {
|
||||
// Already mounting something there, no need for a tmpfs.
|
||||
|
||||
cdir, err := b.store.ContainerDirectory(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
|
||||
}
|
||||
|
||||
// Add secrets mounts
|
||||
mountsFiles := []string{OverrideMountsFile, b.DefaultMountsFilePath}
|
||||
for _, file := range mountsFiles {
|
||||
secretMounts, err := secretMounts(file, b.MountLabel, cdir)
|
||||
if err != nil {
|
||||
logrus.Warn("error mounting secrets, skipping...")
|
||||
continue
|
||||
}
|
||||
// Mount a tmpfs there.
|
||||
for _, mount := range secretMounts {
|
||||
if haveMount(mount.Destination) {
|
||||
continue
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
}
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
// volume locations, unless we already have something there.
|
||||
for _, volume := range volumes {
|
||||
if haveMount(volume) {
|
||||
// Already mounting something there, no need to bother.
|
||||
continue
|
||||
}
|
||||
subdir := digest.Canonical.FromString(volume).Hex()
|
||||
volumePath := filepath.Join(cdir, "buildah-volumes", subdir)
|
||||
// If we need to, initialize the volume path's initial contents.
|
||||
if _, err = os.Stat(volumePath); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(volumePath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||
}
|
||||
if err = label.Relabel(volumePath, b.MountLabel, false); err != nil {
|
||||
return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||
}
|
||||
srcPath := filepath.Join(mountPoint, volume)
|
||||
if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
|
||||
}
|
||||
|
||||
}
|
||||
// Add the bind mount.
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Source: "tmpfs",
|
||||
Source: volumePath,
|
||||
Destination: volume,
|
||||
Type: "tmpfs",
|
||||
Type: "bind",
|
||||
Options: []string{"bind"},
|
||||
})
|
||||
}
|
||||
// Set the list in the spec.
|
||||
@@ -131,12 +175,6 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
}()
|
||||
g := generate.New()
|
||||
|
||||
if b.OS() != "" {
|
||||
g.SetPlatformOS(b.OS())
|
||||
}
|
||||
if b.Architecture() != "" {
|
||||
g.SetPlatformArch(b.Architecture())
|
||||
}
|
||||
for _, envSpec := range append(b.Env(), options.Env...) {
|
||||
env := strings.SplitN(envSpec, "=", 2)
|
||||
if len(env) > 1 {
|
||||
@@ -145,14 +183,16 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
}
|
||||
if len(command) > 0 {
|
||||
g.SetProcessArgs(command)
|
||||
} else if len(options.Cmd) != 0 {
|
||||
g.SetProcessArgs(options.Cmd)
|
||||
} else if len(b.Cmd()) != 0 {
|
||||
g.SetProcessArgs(b.Cmd())
|
||||
} else if len(options.Entrypoint) != 0 {
|
||||
g.SetProcessArgs(options.Entrypoint)
|
||||
} else if len(b.Entrypoint()) != 0 {
|
||||
g.SetProcessArgs(b.Entrypoint())
|
||||
} else {
|
||||
cmd := b.Cmd()
|
||||
if len(options.Cmd) > 0 {
|
||||
cmd = options.Cmd
|
||||
}
|
||||
ep := b.Entrypoint()
|
||||
if len(options.Entrypoint) > 0 {
|
||||
ep = options.Entrypoint
|
||||
}
|
||||
g.SetProcessArgs(append(ep, cmd...))
|
||||
}
|
||||
if options.WorkingDir != "" {
|
||||
g.SetProcessCwd(options.WorkingDir)
|
||||
@@ -164,7 +204,9 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
} else if b.Hostname() != "" {
|
||||
g.SetHostname(b.Hostname())
|
||||
}
|
||||
mountPoint, err := b.Mount("")
|
||||
g.SetProcessSelinuxLabel(b.ProcessLabel)
|
||||
g.SetLinuxMountLabel(b.MountLabel)
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -173,10 +215,32 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
for _, mp := range []string{
|
||||
"/proc/kcore",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/proc/scsi",
|
||||
"/sys/firmware",
|
||||
} {
|
||||
g.AddLinuxMaskedPaths(mp)
|
||||
}
|
||||
|
||||
for _, rp := range []string{
|
||||
"/proc/asound",
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger",
|
||||
} {
|
||||
g.AddLinuxReadonlyPaths(rp)
|
||||
}
|
||||
g.SetRootPath(mountPoint)
|
||||
switch options.Terminal {
|
||||
case DefaultTerminal:
|
||||
g.SetProcessTerminal(logrus.IsTerminal(os.Stdout))
|
||||
g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
|
||||
case WithTerminal:
|
||||
g.SetProcessTerminal(true)
|
||||
case WithoutTerminal:
|
||||
@@ -187,11 +251,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
return errors.Wrapf(err, "error removing network namespace for run")
|
||||
}
|
||||
}
|
||||
if options.User != "" {
|
||||
user, err = getUser(mountPoint, options.User)
|
||||
} else {
|
||||
user, err = getUser(mountPoint, b.User())
|
||||
}
|
||||
user, err = b.user(mountPoint, options.User)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -201,12 +261,12 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
if spec.Process.Cwd == "" {
|
||||
spec.Process.Cwd = DefaultWorkingDir
|
||||
}
|
||||
if err = os.MkdirAll(filepath.Join(mountPoint, b.WorkDir()), 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring working directory %q exists", b.WorkDir())
|
||||
if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
|
||||
}
|
||||
|
||||
bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"}
|
||||
err = setupMounts(spec, options.Mounts, bindFiles, b.Volumes())
|
||||
err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error resolving mountpoints for container")
|
||||
}
|
||||
@@ -228,6 +288,9 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
cmd.Dir = mountPoint
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
if options.Quiet {
|
||||
cmd.Stdout = nil
|
||||
}
|
||||
cmd.Stderr = os.Stderr
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
|
||||
198
secrets.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultMountsFile holds the default mount paths in the form
|
||||
// "host_path:container_path"
|
||||
DefaultMountsFile = "/usr/share/containers/mounts.conf"
|
||||
// OverrideMountsFile holds the default mount paths in the form
|
||||
// "host_path:container_path" overriden by the user
|
||||
OverrideMountsFile = "/etc/containers/mounts.conf"
|
||||
)
|
||||
|
||||
// SecretData info
|
||||
type SecretData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func getMounts(filePath string) []string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
logrus.Warnf("file %q not found, skipping...", filePath)
|
||||
return nil
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
if err = scanner.Err(); err != nil {
|
||||
logrus.Warnf("error reading file %q, skipping...", filePath)
|
||||
return nil
|
||||
}
|
||||
var mounts []string
|
||||
for scanner.Scan() {
|
||||
mounts = append(mounts, scanner.Text())
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
// SaveTo saves secret data to given directory
|
||||
func (s SecretData) SaveTo(dir string) error {
|
||||
path := filepath.Join(dir, s.Name)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(path, s.Data, 0700)
|
||||
}
|
||||
|
||||
func readAll(root, prefix string) ([]SecretData, error) {
|
||||
path := filepath.Join(root, prefix)
|
||||
|
||||
data := []SecretData{}
|
||||
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
fileData, err := readFile(root, filepath.Join(prefix, f.Name()))
|
||||
if err != nil {
|
||||
// If the file did not exist, might be a dangling symlink
|
||||
// Ignore the error
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
data = append(data, fileData...)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readFile(root, name string) ([]SecretData, error) {
|
||||
path := filepath.Join(root, name)
|
||||
|
||||
s, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.IsDir() {
|
||||
dirData, err2 := readAll(root, name)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return dirData, nil
|
||||
}
|
||||
bytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []SecretData{{Name: name, Data: bytes}}, nil
|
||||
}
|
||||
|
||||
// getHostAndCtrDir separates the host:container paths
|
||||
func getMountsMap(path string) (string, string, error) {
|
||||
arr := strings.SplitN(path, ":", 2)
|
||||
if len(arr) == 2 {
|
||||
return arr[0], arr[1], nil
|
||||
}
|
||||
return "", "", errors.Errorf("unable to get host and container dir")
|
||||
}
|
||||
|
||||
func getHostSecretData(hostDir string) ([]SecretData, error) {
|
||||
var allSecrets []SecretData
|
||||
hostSecrets, err := readAll(hostDir, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read secrets from %q", hostDir)
|
||||
}
|
||||
return append(allSecrets, hostSecrets...), nil
|
||||
}
|
||||
|
||||
// secretMount copies the contents of host directory to container directory
|
||||
// and returns a list of mounts
|
||||
func secretMounts(filePath, mountLabel, containerWorkingDir string) ([]rspec.Mount, error) {
|
||||
var mounts []rspec.Mount
|
||||
defaultMountsPaths := getMounts(filePath)
|
||||
for _, path := range defaultMountsPaths {
|
||||
hostDir, ctrDir, err := getMountsMap(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// skip if the hostDir path doesn't exist
|
||||
if _, err = os.Stat(hostDir); os.IsNotExist(err) {
|
||||
logrus.Warnf("%q doesn't exist, skipping", hostDir)
|
||||
continue
|
||||
}
|
||||
|
||||
ctrDirOnHost := filepath.Join(containerWorkingDir, ctrDir)
|
||||
if err = os.RemoveAll(ctrDirOnHost); err != nil {
|
||||
return nil, fmt.Errorf("remove container directory failed: %v", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(ctrDirOnHost, 0755); err != nil {
|
||||
return nil, fmt.Errorf("making container directory failed: %v", err)
|
||||
}
|
||||
|
||||
hostDir, err = resolveSymbolicLink(hostDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := getHostSecretData(hostDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "getting host secret data failed")
|
||||
}
|
||||
for _, s := range data {
|
||||
err = s.SaveTo(ctrDirOnHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = label.Relabel(ctrDirOnHost, mountLabel, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error applying correct labels")
|
||||
}
|
||||
|
||||
m := rspec.Mount{
|
||||
Source: ctrDirOnHost,
|
||||
Destination: ctrDir,
|
||||
Type: "bind",
|
||||
Options: []string{"bind"},
|
||||
}
|
||||
|
||||
mounts = append(mounts, m)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// resolveSymbolicLink resolves a possbile symlink path. If the path is a symlink, returns resolved
|
||||
// path; if not, returns the original path.
|
||||
func resolveSymbolicLink(path string) (string, error) {
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
return path, nil
|
||||
}
|
||||
return filepath.EvalSymlinks(path)
|
||||
}
|
||||
4
selinux_tag.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
if pkg-config libselinux 2> /dev/null ; then
|
||||
echo selinux
|
||||
fi
|
||||
48
tests/authenticate.bats
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "from-authenticate-cert-and-creds" {
|
||||
|
||||
buildah from --pull --name "alpine" --signature-policy ${TESTSDIR}/policy.json alpine
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword alpine localhost:5000/my-alpine
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# This should fail
|
||||
run buildah push localhost:5000/my-alpine --signature-policy ${TESTSDIR}/policy.json --tls-verify=true
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# This should fail
|
||||
run buildah from localhost:5000/my-alpine --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds baduser:badpassword
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# This should work
|
||||
run buildah from localhost:5000/my-alpine --name "my-alpine" --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Create Dockerfile for bud tests
|
||||
FILE=./Dockerfile
|
||||
/bin/cat <<EOM >$FILE
|
||||
FROM localhost:5000/my-alpine
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
|
||||
# Remove containers and images before bud tests
|
||||
buildah rm --all
|
||||
buildah rmi -f --all
|
||||
|
||||
# bud test bad password should fail
|
||||
run buildah bud -f ./Dockerfile --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds=testuser:badpassword
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# bud test this should work
|
||||
run buildah bud -f ./Dockerfile --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds=testuser:testpassword
|
||||
echo $status
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Clean up
|
||||
rm -f ./Dockerfile
|
||||
buildah rm -a
|
||||
buildah rmi -f --all
|
||||
}
|
||||
@@ -95,6 +95,12 @@ load helpers
|
||||
cmp ${TESTDIR}/other-randomfile $yetanothernewroot/other-randomfile
|
||||
buildah delete $yetanothernewcid
|
||||
|
||||
newcid=$(buildah from new-image)
|
||||
buildah commit --rm --signature-policy ${TESTSDIR}/policy.json $newcid containers-storage:remove-container-image
|
||||
run buildah mount $newcid
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
buildah rmi remove-container-image
|
||||
buildah rmi containers-storage:other-new-image
|
||||
buildah rmi another-new-image
|
||||
run buildah --debug=false images -q
|
||||
@@ -104,5 +110,6 @@ load helpers
|
||||
buildah rmi $id
|
||||
done
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -24,6 +25,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
target=alpine-image
|
||||
@@ -37,6 +39,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -52,6 +55,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
target=alpine-image
|
||||
@@ -65,6 +69,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -81,11 +86,14 @@ load helpers
|
||||
run test -s $root/vol/subvol/subvolfile
|
||||
[ "$status" -ne 0 ]
|
||||
test -s $root/vol/volfile
|
||||
test -s $root/vol/Dockerfile
|
||||
test -s $root/vol/Dockerfile2
|
||||
run test -s $root/vol/anothervolfile
|
||||
[ "$status" -ne 0 ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -98,6 +106,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -110,6 +119,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -122,6 +132,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -134,6 +145,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -153,6 +165,7 @@ load helpers
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -166,6 +179,7 @@ load helpers
|
||||
buildah --debug=false images -q
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@@ -175,13 +189,76 @@ load helpers
|
||||
target3=so-many-scratch-images
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -t ${target2} -t ${target3} ${TESTSDIR}/bud/from-scratch
|
||||
run buildah --debug=false images
|
||||
[ "$status" -eq 0 ]
|
||||
cid=$(buildah from ${target})
|
||||
buildah rm ${cid}
|
||||
cid=$(buildah from library/${target2})
|
||||
buildah rm ${cid}
|
||||
cid=$(buildah from ${target3}:latest)
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
buildah rmi -f $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-volume-perms" {
|
||||
# This Dockerfile needs us to be able to handle a working RUN instruction.
|
||||
if ! which runc ; then
|
||||
skip
|
||||
fi
|
||||
target=volume-image
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/volume-perms
|
||||
cid=$(buildah from ${target})
|
||||
root=$(buildah mount ${cid})
|
||||
run test -s $root/vol/subvol/subvolfile
|
||||
[ "$status" -ne 0 ]
|
||||
run stat -c %f $root/vol/subvol
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = 41ed ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-from-glob" {
|
||||
target=alpine-image
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -f Dockerfile2.glob ${TESTSDIR}/bud/from-multiple-files
|
||||
cid=$(buildah from ${target})
|
||||
root=$(buildah mount ${cid})
|
||||
cmp $root/Dockerfile1.alpine ${TESTSDIR}/bud/from-multiple-files/Dockerfile1.alpine
|
||||
cmp $root/Dockerfile2.withfrom ${TESTSDIR}/bud/from-multiple-files/Dockerfile2.withfrom
|
||||
buildah rm ${cid}
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-maintainer" {
|
||||
target=alpine-image
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/maintainer
|
||||
run buildah --debug=false inspect --type=image --format '{{.Docker.Author}}' ${target}
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = kilroy ]
|
||||
run buildah --debug=false inspect --type=image --format '{{.OCIv1.Author}}' ${target}
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = kilroy ]
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-unrecognized-instruction" {
|
||||
target=alpine-image
|
||||
run buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/unrecognized
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ BOGUS ]]
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
2
tests/bud/from-multiple-files/Dockerfile2.glob
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM alpine
|
||||
COPY Dockerfile* /
|
||||
2
tests/bud/maintainer/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM alpine
|
||||
MAINTAINER kilroy
|
||||
@@ -14,3 +14,9 @@ VOLUME /vol/subvol
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/anothervolfile
|
||||
# Which means that in the image we're about to commit, /vol/anothervolfile
|
||||
# shouldn't exist, either.
|
||||
|
||||
# ADD files which should persist.
|
||||
ADD Dockerfile /vol/Dockerfile
|
||||
RUN stat /vol/Dockerfile
|
||||
ADD Dockerfile /vol/Dockerfile2
|
||||
RUN stat /vol/Dockerfile2
|
||||
|
||||
2
tests/bud/unrecognized/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM alpine
|
||||
BOGUS nope-nope-nope
|
||||
6
tests/bud/volume-perms/Dockerfile
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM alpine
|
||||
VOLUME /vol/subvol
|
||||
# At this point, the directory should exist, with default permissions 0755, the
|
||||
# contents below /vol/subvol should be frozen, and we shouldn't get an error
|
||||
# from trying to write to it because we it was created automatically.
|
||||
RUN dd if=/dev/zero bs=512 count=1 of=/vol/subvol/subvolfile
|
||||
124
tests/byid.bats
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "from-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to create a container.
|
||||
run buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json ${iid}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
cid="$output"
|
||||
buildah rm $cid
|
||||
|
||||
# Use a truncated form of the image's ID to create a container.
|
||||
run buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
cid="$output"
|
||||
buildah rm $cid
|
||||
|
||||
buildah rmi $iid
|
||||
}
|
||||
|
||||
@test "inspect-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to inspect it.
|
||||
run buildah --debug=false inspect --type=image ${iid}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use a truncated copy of the image's ID to inspect it.
|
||||
run buildah --debug=false inspect --type=image ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
buildah rmi $iid
|
||||
}
|
||||
|
||||
@test "push-by-id" {
|
||||
for image in busybox kubernetes/pause ; do
|
||||
echo pulling/pushing image $image
|
||||
|
||||
TARGET=${TESTDIR}/subdir-$(basename $image)
|
||||
mkdir -p $TARGET $TARGET-truncated
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $IMAGE
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to push it.
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json $iid dir:$TARGET
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use a truncated form of the image's ID to push it.
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json ${iid:0:6} dir:$TARGET-truncated
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use the image's complete ID to remove it.
|
||||
buildah rmi $iid
|
||||
done
|
||||
}
|
||||
|
||||
@test "rmi-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use a truncated copy of the image's ID to remove it.
|
||||
run buildah --debug=false rmi ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
@@ -21,6 +21,13 @@ load helpers
|
||||
buildah copy $cid ${TESTDIR}/randomfile
|
||||
buildah copy $cid ${TESTDIR}/other-randomfile ${TESTDIR}/third-randomfile ${TESTDIR}/randomfile /etc
|
||||
buildah rm $cid
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
root=$(buildah mount $cid)
|
||||
buildah config --workingdir / $cid
|
||||
buildah copy $cid "${TESTDIR}/*randomfile" /etc
|
||||
(cd ${TESTDIR}; for i in *randomfile; do cmp $i ${root}/etc/$i; done)
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
@test "copy-local-plain" {
|
||||
@@ -104,3 +111,29 @@ load helpers
|
||||
[ "$status" -ne 0 ]
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
@test "copy --chown" {
|
||||
mkdir -p ${TESTDIR}/subdir
|
||||
mkdir -p ${TESTDIR}/other-subdir
|
||||
createrandom ${TESTDIR}/subdir/randomfile
|
||||
createrandom ${TESTDIR}/subdir/other-randomfile
|
||||
createrandom ${TESTDIR}/randomfile
|
||||
createrandom ${TESTDIR}/other-subdir/randomfile
|
||||
createrandom ${TESTDIR}/other-subdir/other-randomfile
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
root=$(buildah mount $cid)
|
||||
buildah config --workingdir / $cid
|
||||
buildah copy --chown 1:1 $cid ${TESTDIR}/randomfile
|
||||
buildah copy --chown root:1 $cid ${TESTDIR}/randomfile /randomfile2
|
||||
buildah copy --chown nobody $cid ${TESTDIR}/randomfile /randomfile3
|
||||
buildah copy --chown nobody:root $cid ${TESTDIR}/subdir /subdir
|
||||
test $(stat -c "%u:%g" $root/randomfile) = "1:1"
|
||||
test $(stat -c "%U:%g" $root/randomfile2) = "root:1"
|
||||
test $(stat -c "%U" $root/randomfile3) = "nobody"
|
||||
(cd $root/subdir/; for i in *; do test $(stat -c "%U:%G" $i) = "nobody:root"; done)
|
||||
buildah copy --chown root:root $cid ${TESTDIR}/other-subdir /subdir
|
||||
(cd $root/subdir/; for i in *randomfile; do test $(stat -c "%U:%G" $i) = "root:root"; done)
|
||||
test $(stat -c "%U:%G" $root/subdir) = "nobody:root"
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
44
tests/digest.bats
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
fromreftest() {
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json $1)
|
||||
pushdir=${TESTDIR}/fromreftest
|
||||
mkdir -p ${pushdir}/{1,2,3}
|
||||
buildah push --signature-policy ${TESTSDIR}/policy.json $1 dir:${pushdir}/1
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid new-image
|
||||
buildah push --signature-policy ${TESTSDIR}/policy.json new-image dir:${pushdir}/2
|
||||
buildah rmi new-image
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid dir:${pushdir}/3
|
||||
buildah rm $cid
|
||||
rm -fr ${pushdir}
|
||||
}
|
||||
|
||||
@test "from-by-digest-s1" {
|
||||
fromreftest kubernetes/pause@sha256:f8cd50c5a287dd8c5f226cf69c60c737d34ed43726c14b8a746d9de2d23eda2b
|
||||
}
|
||||
|
||||
@test "from-by-digest-s1-a-discarded-layer" {
|
||||
fromreftest docker/whalesay@sha256:178598e51a26abbc958b8a2e48825c90bc22e641de3d31e18aaf55f3258ba93b
|
||||
}
|
||||
|
||||
@test "from-by-tag-s1" {
|
||||
fromreftest kubernetes/pause:go
|
||||
}
|
||||
|
||||
@test "from-by-repo-only-s1" {
|
||||
fromreftest kubernetes/pause
|
||||
}
|
||||
|
||||
@test "from-by-digest-s2" {
|
||||
fromreftest alpine@sha256:e9cec9aec697d8b9d450edd32860ecd363f2f3174c8338beb5f809422d182c63
|
||||
}
|
||||
|
||||
@test "from-by-tag-s2" {
|
||||
fromreftest alpine:2.6
|
||||
}
|
||||
|
||||
@test "from-by-repo-only-s2" {
|
||||
fromreftest alpine
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
load helpers
|
||||
|
||||
@test "write-formats" {
|
||||
buildimgtype
|
||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid scratch-image-default
|
||||
buildah commit --format dockerv2 --signature-policy ${TESTSDIR}/policy.json $cid scratch-image-docker
|
||||
@@ -20,7 +19,6 @@ load helpers
|
||||
}
|
||||
|
||||
@test "bud-formats" {
|
||||
buildimgtype
|
||||
buildah build-using-dockerfile --signature-policy ${TESTSDIR}/policy.json -t scratch-image-default -f bud/from-scratch/Dockerfile
|
||||
buildah build-using-dockerfile --format dockerv2 --signature-policy ${TESTSDIR}/policy.json -t scratch-image-docker -f bud/from-scratch/Dockerfile
|
||||
buildah build-using-dockerfile --format ociv1 --signature-policy ${TESTSDIR}/policy.json -t scratch-image-oci -f bud/from-scratch/Dockerfile
|
||||
|
||||
130
tests/from.bats
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "commit-to-from-elsewhere" {
|
||||
elsewhere=${TESTDIR}/elsewhere-img
|
||||
mkdir -p ${elsewhere}
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid dir:${elsewhere}
|
||||
buildah rm $cid
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
[ "$cid" = elsewhere-img-working-container ]
|
||||
|
||||
cid=$(buildah from --pull-always --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
[ "$cid" = `basename ${elsewhere}`-working-container ]
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid dir:${elsewhere}
|
||||
buildah rm $cid
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
[ "$cid" = elsewhere-img-working-container ]
|
||||
|
||||
cid=$(buildah from --pull-always --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
[ "$cid" = `basename ${elsewhere}`-working-container ]
|
||||
}
|
||||
|
||||
@test "from-authenticate-cert" {
|
||||
|
||||
mkdir -p ${TESTDIR}/auth
|
||||
# Create certifcate via openssl
|
||||
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ${TESTDIR}/auth/domain.key -x509 -days 2 -out ${TESTDIR}/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
# Skopeo and buildah both require *.cert file
|
||||
cp ${TESTDIR}/auth/domain.crt ${TESTDIR}/auth/domain.cert
|
||||
|
||||
# Create a private registry that uses certificate and creds file
|
||||
# docker run -d -p 5000:5000 --name registry -v ${TESTDIR}/auth:${TESTDIR}/auth:Z -e REGISTRY_HTTP_TLS_CERTIFICATE=${TESTDIR}/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=${TESTDIR}/auth/domain.key registry:2
|
||||
|
||||
# When more buildah auth is in place convert the below.
|
||||
# docker pull alpine
|
||||
# docker tag alpine localhost:5000/my-alpine
|
||||
# docker push localhost:5000/my-alpine
|
||||
|
||||
# ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth)
|
||||
# buildah rm $ctrid
|
||||
# buildah rmi -f $(buildah --debug=false images -q)
|
||||
|
||||
# This should work
|
||||
# ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth --tls-verify true)
|
||||
|
||||
rm -rf ${TESTDIR}/auth
|
||||
|
||||
# This should fail
|
||||
run ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth --tls-verify true)
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# Clean up
|
||||
# docker rm -f $(docker ps --all -q)
|
||||
# docker rmi -f localhost:5000/my-alpine
|
||||
# docker rmi -f $(docker images -q)
|
||||
# buildah rm $ctrid
|
||||
# buildah rmi -f $(buildah --debug=false images -q)
|
||||
}
|
||||
|
||||
@test "from-authenticate-cert-and-creds" {
|
||||
mkdir -p ${TESTDIR}/auth
|
||||
# Create creds and store in ${TESTDIR}/auth/htpasswd
|
||||
# docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > ${TESTDIR}/auth/htpasswd
|
||||
# Create certifcate via openssl
|
||||
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ${TESTDIR}/auth/domain.key -x509 -days 2 -out ${TESTDIR}/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
# Skopeo and buildah both require *.cert file
|
||||
cp ${TESTDIR}/auth/domain.crt ${TESTDIR}/auth/domain.cert
|
||||
|
||||
# Create a private registry that uses certificate and creds file
|
||||
# docker run -d -p 5000:5000 --name registry -v ${TESTDIR}/auth:${TESTDIR}/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=${TESTDIR}/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=${TESTDIR}/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=${TESTDIR}/auth/domain.key registry:2
|
||||
|
||||
# When more buildah auth is in place convert the below.
|
||||
# docker pull alpine
|
||||
# docker login localhost:5000 --username testuser --password testpassword
|
||||
# docker tag alpine localhost:5000/my-alpine
|
||||
# docker push localhost:5000/my-alpine
|
||||
|
||||
# ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth)
|
||||
# buildah rm $ctrid
|
||||
# buildah rmi -f $(buildah --debug=false images -q)
|
||||
|
||||
# docker logout localhost:5000
|
||||
|
||||
# This should fail
|
||||
run ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth --tls-verify true)
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# This should work
|
||||
# ctrid=$(buildah from localhost:5000/my-alpine --cert-dir ${TESTDIR}/auth --tls-verify true --creds=testuser:testpassword)
|
||||
|
||||
# Clean up
|
||||
rm -rf ${TESTDIR}/auth
|
||||
# docker rm -f $(docker ps --all -q)
|
||||
# docker rmi -f localhost:5000/my-alpine
|
||||
# docker rmi -f $(docker images -q)
|
||||
# buildah rm $ctrid
|
||||
# buildah rmi -f $(buildah --debug=false images -q)
|
||||
}
|
||||
|
||||
@test "from-tagged-image" {
|
||||
# Github #396: Make sure the container name starts with the correct image even when it's tagged.
|
||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json "$cid" scratch2
|
||||
buildah rm $cid
|
||||
buildah tag scratch2 scratch3
|
||||
cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json scratch3)
|
||||
[ "$cid" == scratch3-working-container ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi scratch2 scratch3
|
||||
|
||||
# Github https://github.com/projectatomic/buildah/issues/396#issuecomment-360949396
|
||||
cid=$(buildah from --pull=true --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
buildah rm $cid
|
||||
buildah tag alpine alpine2
|
||||
cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json docker.io/alpine2)
|
||||
[ "$cid" == alpine2-working-container ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi alpine alpine2
|
||||
}
|
||||
@@ -1,18 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
BUILDAH_BINARY=${BUILDAH_BINARY:-$(dirname ${BASH_SOURCE})/../buildah}
|
||||
IMGTYPE_BINARY=${IMGTYPE_BINARY:-$(dirname ${BASH_SOURCE})/../imgtype}
|
||||
TESTSDIR=${TESTSDIR:-$(dirname ${BASH_SOURCE})}
|
||||
STORAGE_DRIVER=${STORAGE_DRIVER:-vfs}
|
||||
PATH=$(dirname ${BASH_SOURCE})/..:${PATH}
|
||||
|
||||
function setup() {
|
||||
suffix=$(dd if=/dev/urandom bs=12 count=1 status=none | base64 | tr +/ _.)
|
||||
suffix=$(dd if=/dev/urandom bs=12 count=1 status=none | base64 | tr +/ABCDEFGHIJKLMNOPQRSTUVWXYZ _.abcdefghijklmnopqrstuvwxyz)
|
||||
TESTDIR=${BATS_TMPDIR}/tmp.${suffix}
|
||||
rm -fr ${TESTDIR}
|
||||
mkdir -p ${TESTDIR}/{root,runroot}
|
||||
REPO=${TESTDIR}/root
|
||||
}
|
||||
|
||||
function buildimgtype() {
|
||||
go build -tags "$(${TESTSDIR}/../btrfs_tag.sh; ${TESTSDIR}/../libdm_tag.sh)" -o imgtype ${TESTSDIR}/imgtype.go
|
||||
}
|
||||
|
||||
function starthttpd() {
|
||||
@@ -44,9 +42,9 @@ function createrandom() {
|
||||
}
|
||||
|
||||
function buildah() {
|
||||
${BUILDAH_BINARY} --debug --root ${TESTDIR}/root --runroot ${TESTDIR}/runroot --storage-driver vfs "$@"
|
||||
${BUILDAH_BINARY} --debug --registries-conf ${TESTSDIR}/registries.conf --root ${TESTDIR}/root --runroot ${TESTDIR}/runroot --storage-driver ${STORAGE_DRIVER} "$@"
|
||||
}
|
||||
|
||||
function imgtype() {
|
||||
./imgtype -root ${TESTDIR}/root -runroot ${TESTDIR}/runroot -storage-driver vfs "$@"
|
||||
${IMGTYPE_BINARY} -root ${TESTDIR}/root -runroot ${TESTDIR}/runroot -storage-driver ${STORAGE_DRIVER} "$@"
|
||||
}
|
||||
|
||||
149
tests/imgtype.go
@@ -4,22 +4,29 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/docker"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if buildah.InitReexec() {
|
||||
return
|
||||
}
|
||||
|
||||
expectedManifestType := ""
|
||||
expectedConfigType := ""
|
||||
|
||||
storeOptions := storage.DefaultStoreOptions
|
||||
debug := flag.Bool("debug", false, "turn on debug logging")
|
||||
root := flag.String("root", storeOptions.GraphRoot, "storage root directory")
|
||||
runroot := flag.String("runroot", storeOptions.RunRoot, "storage runtime directory")
|
||||
driver := flag.String("storage-driver", storeOptions.GraphDriverName, "storage driver")
|
||||
@@ -27,8 +34,14 @@ func main() {
|
||||
policy := flag.String("signature-policy", "", "signature policy file")
|
||||
mtype := flag.String("expected-manifest-type", buildah.OCIv1ImageManifest, "expected manifest type")
|
||||
showm := flag.Bool("show-manifest", false, "output the manifest JSON")
|
||||
rebuildm := flag.Bool("rebuild-manifest", false, "rebuild the manifest JSON")
|
||||
showc := flag.Bool("show-config", false, "output the configuration JSON")
|
||||
rebuildc := flag.Bool("rebuild-config", false, "rebuild the configuration JSON")
|
||||
flag.Parse()
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if debug != nil && *debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
switch *mtype {
|
||||
case buildah.OCIv1ImageManifest:
|
||||
expectedManifestType = *mtype
|
||||
@@ -40,8 +53,9 @@ func main() {
|
||||
expectedManifestType = ""
|
||||
expectedConfigType = ""
|
||||
default:
|
||||
logrus.Fatalf("unknown -expected-manifest-type value, expected either %q or %q or %q",
|
||||
logrus.Errorf("unknown -expected-manifest-type value, expected either %q or %q or %q",
|
||||
buildah.OCIv1ImageManifest, buildah.Dockerv2ImageManifest, "*")
|
||||
return
|
||||
}
|
||||
if root != nil {
|
||||
storeOptions.GraphRoot = *root
|
||||
@@ -65,11 +79,20 @@ func main() {
|
||||
}
|
||||
store, err := storage.GetStore(storeOptions)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error opening storage: %v", err)
|
||||
logrus.Errorf("error opening storage: %v", err)
|
||||
return
|
||||
}
|
||||
defer store.Shutdown(false)
|
||||
is.Transport.SetStore(store)
|
||||
|
||||
errors := false
|
||||
defer func() {
|
||||
store.Shutdown(false)
|
||||
if errors {
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
for _, image := range args {
|
||||
var ref types.ImageReference
|
||||
oImage := v1.Image{}
|
||||
dImage := docker.V2Image{}
|
||||
oManifest := v1.Manifest{}
|
||||
@@ -79,60 +102,138 @@ func main() {
|
||||
|
||||
ref, err := is.Transport.ParseStoreReference(store, image)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error parsing reference %q: %v", image, err)
|
||||
}
|
||||
|
||||
src, err := ref.NewImageSource(systemContext, []string{expectedManifestType})
|
||||
if err != nil {
|
||||
logrus.Fatalf("error opening source image %q: %v", image, err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
manifest, manifestType, err := src.GetManifest()
|
||||
if err != nil {
|
||||
logrus.Fatalf("error reading manifest from %q: %v", image, err)
|
||||
ref2, err2 := alltransports.ParseImageName(image)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error parsing reference %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
ref = ref2
|
||||
}
|
||||
|
||||
img, err := ref.NewImage(systemContext)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error opening image %q: %v", image, err)
|
||||
logrus.Errorf("error opening image %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
defer img.Close()
|
||||
|
||||
config, err := img.ConfigBlob()
|
||||
if err != nil {
|
||||
logrus.Fatalf("error reading configuration from %q: %v", image, err)
|
||||
logrus.Errorf("error reading configuration from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
|
||||
manifest, manifestType, err := img.Manifest()
|
||||
if err != nil {
|
||||
logrus.Errorf("error reading manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
|
||||
switch expectedManifestType {
|
||||
case buildah.OCIv1ImageManifest:
|
||||
err = json.Unmarshal(manifest, &oManifest)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error parsing manifest from %q: %v", image, err)
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
err = json.Unmarshal(config, &oImage)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error parsing config from %q: %v", image, err)
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifestType = v1.MediaTypeImageManifest
|
||||
configType = oManifest.Config.MediaType
|
||||
case buildah.Dockerv2ImageManifest:
|
||||
err = json.Unmarshal(manifest, &dManifest)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error parsing manifest from %q: %v", image, err)
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
err = json.Unmarshal(config, &dImage)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error parsing config from %q: %v", image, err)
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifestType = dManifest.MediaType
|
||||
configType = dManifest.Config.MediaType
|
||||
}
|
||||
if expectedManifestType != "" && manifestType != expectedManifestType {
|
||||
logrus.Fatalf("expected manifest type %q in %q, got %q", expectedManifestType, image, manifestType)
|
||||
logrus.Errorf("expected manifest type %q in %q, got %q", expectedManifestType, image, manifestType)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
switch manifestType {
|
||||
case buildah.OCIv1ImageManifest:
|
||||
if rebuildm != nil && *rebuildm {
|
||||
err = json.Unmarshal(manifest, &oManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifest, err = json.Marshal(oManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rebuildc != nil && *rebuildc {
|
||||
err = json.Unmarshal(config, &oImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
config, err = json.Marshal(oImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
case buildah.Dockerv2ImageManifest:
|
||||
if rebuildm != nil && *rebuildm {
|
||||
err = json.Unmarshal(manifest, &dManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifest, err = json.Marshal(dManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rebuildc != nil && *rebuildc {
|
||||
err = json.Unmarshal(config, &dImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
config, err = json.Marshal(dImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectedConfigType != "" && configType != expectedConfigType {
|
||||
logrus.Fatalf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
|
||||
logrus.Errorf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
if showm != nil && *showm {
|
||||
fmt.Println(string(manifest))
|
||||
|
||||