Compare commits
97 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e073df11aa | ||
|
|
8badcc2d02 | ||
|
|
a586779353 | ||
|
|
4eb654f10c | ||
|
|
f29314579d | ||
|
|
9e20c3d948 | ||
|
|
46c1a54b15 | ||
|
|
67b565da7b | ||
|
|
dd4a6aea97 | ||
|
|
9116598a2e | ||
|
|
df2a10d43f | ||
|
|
e9915937ac | ||
|
|
5a9c591abf | ||
|
|
531ef9159d | ||
|
|
811cf927d7 | ||
|
|
032b56ee8d | ||
|
|
6af847dd2a | ||
|
|
6b207f7b0c | ||
|
|
c14697ebe4 | ||
|
|
c35493248e | ||
|
|
d03a894969 | ||
|
|
fbb8b702bc | ||
|
|
815cedfc71 | ||
|
|
1c97f6ac2c | ||
|
|
bc9d574c10 | ||
|
|
c84db980ae | ||
|
|
85a37b39e8 | ||
|
|
49095a83f8 | ||
|
|
6c05a352df | ||
|
|
1849466827 | ||
|
|
4f38267342 | ||
|
|
9790b89771 | ||
|
|
61f5319504 | ||
|
|
947714fbd2 | ||
|
|
c615c3e23d | ||
|
|
d0e1ad1a1a | ||
|
|
b68f88c53d | ||
|
|
7dc787a9c7 | ||
|
|
2dbb2a13ed | ||
|
|
ad49b24d0b | ||
|
|
ba128004ca | ||
|
|
5179733c63 | ||
|
|
40c3a57d5a | ||
|
|
de9e71dda7 | ||
|
|
1052f3ba40 | ||
|
|
6bad262ff1 | ||
|
|
092591620b | ||
|
|
4d6c90e902 | ||
|
|
17d9a73329 | ||
|
|
fe2de4f491 | ||
|
|
adfb256a0f | ||
|
|
029bdbcbd0 | ||
|
|
fd995e6166 | ||
|
|
ae7d2f3547 | ||
|
|
86fa0803e8 | ||
|
|
81dfe0a964 | ||
|
|
9bff989832 | ||
|
|
b8740e386e | ||
|
|
9f5e1b3a77 | ||
|
|
01f8c7afee | ||
|
|
67e5341846 | ||
|
|
123493895f | ||
|
|
129fb109d5 | ||
|
|
979c945674 | ||
|
|
c77a8d39f1 | ||
|
|
f4151372e5 | ||
|
|
0705787a07 | ||
|
|
a5129ec3eb | ||
|
|
47ac96155f | ||
|
|
8b2b56d9b8 | ||
|
|
544e63de42 | ||
|
|
43a025ebf9 | ||
|
|
6116d6a9bc | ||
|
|
e5aa6c9fc5 | ||
|
|
95ca6c1e1f | ||
|
|
7244ef44fb | ||
|
|
9df6f62a4c | ||
|
|
bf01a80b2b | ||
|
|
ccd3b3fedb | ||
|
|
4b4e25868c | ||
|
|
4d943752fe | ||
|
|
9128a40ada | ||
|
|
8910199181 | ||
|
|
1fc5a49958 | ||
|
|
98f1533731 | ||
|
|
aae843123f | ||
|
|
77804bf256 | ||
|
|
7aaa21d70a | ||
|
|
ee9b8cde5a | ||
|
|
04ea079130 | ||
|
|
2dd03d6741 | ||
|
|
1680a5f0a0 | ||
|
|
5dd1a5f3c9 | ||
|
|
15792b227a | ||
|
|
38d3cddb0c | ||
|
|
a99d5f0798 | ||
|
|
53c3e6434d |
28
.copr/Makefile
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
spec := contrib/rpm/buildah_copr.spec
|
||||
outdir := $(CURDIR)
|
||||
tmpdir := build
|
||||
gitdir := $(PWD)/.git
|
||||
|
||||
rev := $(shell sed 's/\(.......\).*/\1/' $(gitdir)/$$(sed -n '/^ref:/{s/.* //;p}' $(gitdir)/HEAD))
|
||||
date := $(shell date +%Y%m%d.%H%M)
|
||||
|
||||
version := $(shell sed -n '/Version:/{s/.* //;p}' $(spec))
|
||||
release := $(date).git.$(rev)
|
||||
|
||||
srpm: $(outdir)/buildah-$(version)-$(release).src.rpm
|
||||
|
||||
$(tmpdir)/buildah.spec: $(spec)
|
||||
@mkdir -p $(tmpdir)
|
||||
sed '/^Release:/s/\(: *\).*/\1$(release)%{?dist}/' $< >$@
|
||||
|
||||
$(tmpdir)/$(version).tar.gz: $(gitdir)/..
|
||||
@mkdir -p $(tmpdir)
|
||||
tar c --exclude-vcs --exclude-vcs-ignores -C $< --transform 's|^\.|buildah-$(version)|' . | gzip -9 >$@
|
||||
|
||||
$(outdir)/buildah-$(version)-$(release).src.rpm: $(tmpdir)/buildah.spec $(tmpdir)/$(version).tar.gz
|
||||
@mkdir -p $(outdir)
|
||||
rpmbuild -D'_srcrpmdir $(outdir)' -D'_sourcedir $(tmpdir)' -bs $(tmpdir)/buildah.spec
|
||||
|
||||
.PHONY: srpm
|
||||
65
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
<!--
|
||||
If you are reporting a new issue, make sure that we do not have any duplicates
|
||||
already open. You can ensure this by searching the issue list for this
|
||||
repository. If there is a duplicate, please close your issue and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
If you suspect your issue is a bug, please edit your issue description to
|
||||
include the BUG REPORT INFORMATION shown below. If you fail to provide this
|
||||
information within 7 days, we cannot debug your issue and will close it. We
|
||||
will, however, reopen it if you later provide the information.
|
||||
|
||||
---------------------------------------------------
|
||||
BUG REPORT INFORMATION
|
||||
---------------------------------------------------
|
||||
Use the commands below to provide key information from your environment:
|
||||
You do NOT have to include this information if this is a FEATURE REQUEST
|
||||
-->
|
||||
|
||||
**Description**
|
||||
|
||||
<!--
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
-->
|
||||
|
||||
**Steps to reproduce the issue:**
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
**Describe the results you received:**
|
||||
|
||||
|
||||
**Describe the results you expected:**
|
||||
|
||||
|
||||
**Output of `rpm -q buildah` or `apt list buildah`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `buildah version`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/*release`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `uname -a`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
**Output of `cat /etc/containers/storage.conf`:**
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
docs/buildah*.1
|
||||
/buildah
|
||||
/imgtype
|
||||
/build/
|
||||
|
||||
1
.papr.sh
@@ -23,6 +23,7 @@ dnf install -y \
|
||||
libselinux-devel \
|
||||
libselinux-utils \
|
||||
make \
|
||||
openssl \
|
||||
ostree-devel \
|
||||
which
|
||||
|
||||
|
||||
34
.papr.yml
@@ -9,6 +9,40 @@ host:
|
||||
required: true
|
||||
|
||||
tests:
|
||||
# Let's create a self signed certificate and get it in the right places
|
||||
- hostname
|
||||
- ip a
|
||||
- ping -c 3 localhost
|
||||
- cat /etc/hostname
|
||||
- mkdir -p /home/travis/auth
|
||||
- openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
- cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
|
||||
- sudo mkdir -p /etc/docker/certs.d/docker.io/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
|
||||
- sudo mkdir -p /etc/docker/certs.d/localhost:5000/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
|
||||
# Create the credentials file, then start up the Docker registry
|
||||
- docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
|
||||
- docker run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
|
||||
|
||||
# Test Docker setup
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- ls -alF /home/travis/auth
|
||||
- docker pull alpine
|
||||
- docker login localhost:5000 --username testuser --password testpassword
|
||||
- docker tag alpine localhost:5000/my-alpine
|
||||
- docker push localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi docker.io/alpine
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
- docker pull localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
|
||||
# mount yum repos to inherit injected mirrors from PAPR
|
||||
- docker run --net=host --privileged -v /etc/yum.repos.d:/etc/yum.repos.d.host:ro
|
||||
-v $PWD:/code registry.fedoraproject.org/fedora:26 sh -c
|
||||
|
||||
39
.travis.yml
@@ -2,7 +2,6 @@ language: go
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9.x
|
||||
- tip
|
||||
@@ -20,10 +19,46 @@ services:
|
||||
- docker
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:duggan/bats
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -qq install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libselinux1-dev
|
||||
- sudo apt-get -qq remove libseccomp2
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
- mkdir /home/travis/auth
|
||||
install:
|
||||
# Let's create a self signed certificate and get it in the right places
|
||||
- hostname
|
||||
- ip a
|
||||
- ping -c 3 localhost
|
||||
- cat /etc/hostname
|
||||
- openssl req -newkey rsa:4096 -nodes -sha256 -keyout /home/travis/auth/domain.key -x509 -days 2 -out /home/travis/auth/domain.crt -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
- cp /home/travis/auth/domain.crt /home/travis/auth/domain.cert
|
||||
- sudo mkdir -p /etc/docker/certs.d/docker.io/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/docker.io/ca.crt
|
||||
- sudo mkdir -p /etc/docker/certs.d/localhost:5000/
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/ca.crt
|
||||
- sudo cp /home/travis/auth/domain.crt /etc/docker/certs.d/localhost:5000/domain.crt
|
||||
# Create the credentials file, then start up the Docker registry
|
||||
- docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > /home/travis/auth/htpasswd
|
||||
- docker run -d -p 5000:5000 --name registry -v /home/travis/auth:/home/travis/auth:Z -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/home/travis/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/home/travis/auth/domain.crt -e REGISTRY_HTTP_TLS_KEY=/home/travis/auth/domain.key registry:2
|
||||
script:
|
||||
# Let's do some docker stuff just for verification purposes
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- ls -alF /home/travis/auth
|
||||
- docker pull alpine
|
||||
- docker login localhost:5000 --username testuser --password testpassword
|
||||
- docker tag alpine localhost:5000/my-alpine
|
||||
- docker push localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi docker.io/alpine
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
- docker pull localhost:5000/my-alpine
|
||||
- docker ps --all
|
||||
- docker images
|
||||
- docker rmi localhost:5000/my-alpine
|
||||
# Setting up Docker Registry is complete, let's do Buildah testing!
|
||||
- make install.tools install.libseccomp.sudo all runc validate TAGS="apparmor seccomp containers_image_ostree_stub"
|
||||
- go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
|
||||
- tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v -root $tmp/root -runroot $tmp/runroot -storage-driver vfs -signature-policy `pwd`/tests/policy.json
|
||||
|
||||
6
Makefile
@@ -24,7 +24,7 @@ imgtype: *.go docker/*.go util/*.go tests/imgtype.go
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(RM) buildah imgtype
|
||||
$(RM) buildah imgtype build
|
||||
$(MAKE) -C docs clean
|
||||
|
||||
.PHONY: docs
|
||||
@@ -78,6 +78,10 @@ install:
|
||||
install.completions:
|
||||
install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/${BASHINSTALLDIR}/buildah
|
||||
|
||||
.PHONY: install.runc
|
||||
install.runc:
|
||||
install -m 755 ../../opencontainers/runc/runc $(DESTDIR)/$(BINDIR)/
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration:
|
||||
cd tests; ./test_runner.sh
|
||||
|
||||
130
README.md
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool which facilitates building OCI container images
|
||||
================================================================
|
||||
@@ -19,108 +19,38 @@ The Buildah package provides a command line tool which can be used to
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**Installation notes**
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
Prior to installing Buildah, install the following packages on your linux distro:
|
||||
* make
|
||||
* golang (Requires version 1.8.1 or higher.)
|
||||
* bats
|
||||
* btrfs-progs-devel
|
||||
* bzip2
|
||||
* device-mapper-devel
|
||||
* git
|
||||
* go-md2man
|
||||
* gpgme-devel
|
||||
* glib2-devel
|
||||
* libassuan-devel
|
||||
* ostree-devel
|
||||
* runc (Requires version 1.0 RC4 or higher.)
|
||||
* skopeo-containers
|
||||
**[Tutorials](docs/tutorials/tutorials.md)**
|
||||
|
||||
In Fedora, you can use this command:
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<EOF
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=`buildah from ${1:-fedora}`
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run $ctr1 -- dnf update -y
|
||||
buildah run $ctr1 -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config $ctr1 --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf"
|
||||
buildah config $ctr1 --port 80
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit $ctr1 ${2:-$USER/lighttpd}
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
dnf -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
Then to install Buildah on Fedora follow the steps in this example:
|
||||
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
make
|
||||
make install
|
||||
buildah --help
|
||||
```
|
||||
|
||||
In RHEL 7, ensure that you are subscribed to `rhel-7-server-rpms`,
|
||||
`rhel-7-server-extras-rpms`, and `rhel-7-server-optional-rpms`, then
|
||||
run this command:
|
||||
|
||||
```
|
||||
yum -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
The build steps for Buildah on RHEL are the same as Fedora, above.
|
||||
|
||||
In Ubuntu zesty and xenial, you can use this command:
|
||||
|
||||
```
|
||||
apt-get -y install software-properties-common
|
||||
add-apt-repository -y ppa:alexlarsson/flatpak
|
||||
add-apt-repository -y ppa:gophers/archive
|
||||
apt-add-repository -y ppa:projectatomic/ppa
|
||||
apt-get -y -qq update
|
||||
apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
apt-get -y install golang-1.8
|
||||
```
|
||||
Then to install Buildah on Ubuntu follow the steps in this example:
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
PATH=/usr/lib/go-1.8/bin:$PATH make runc all TAGS="apparmor seccomp"
|
||||
make install
|
||||
buildah --help
|
||||
```
|
||||
Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
|
||||
[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases.
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
@@ -135,7 +65,7 @@ encounters a `RUN` instruction, so you'll also need to build and install a compa
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Copies an image from local storage. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
|
||||
101
add.go
@@ -12,10 +12,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/libpod/pkg/chrootuser"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
//AddAndCopyOptions holds options for add and copy commands.
|
||||
type AddAndCopyOptions struct {
|
||||
Chown string
|
||||
}
|
||||
|
||||
// addURL copies the contents of the source URL to the destination. This is
|
||||
// its own function so that deferred closes happen after we're done pulling
|
||||
// down each item of potentially many.
|
||||
@@ -58,7 +65,7 @@ func addURL(destination, srcurl string) error {
|
||||
// Add copies the contents of the specified sources into the container's root
|
||||
// filesystem, optionally extracting contents of local files that look like
|
||||
// non-empty archives.
|
||||
func (b *Builder) Add(destination string, extract bool, source ...string) error {
|
||||
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -68,12 +75,17 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
// Find out which user (and group) the destination should belong to.
|
||||
user, err := b.user(mountPoint, options.Chown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest := mountPoint
|
||||
if destination != "" && filepath.IsAbs(destination) {
|
||||
dest = filepath.Join(dest, destination)
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Join(dest, b.WorkDir()), 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists)", filepath.Join(dest, b.WorkDir()))
|
||||
if err = ensureDir(filepath.Join(dest, b.WorkDir()), user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
dest = filepath.Join(dest, b.WorkDir(), destination)
|
||||
}
|
||||
@@ -81,8 +93,8 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
// with a '/', create it so that we can be sure that it's a directory,
|
||||
// and any files we're copying will be placed in the directory.
|
||||
if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
|
||||
if err = os.MkdirAll(dest, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", dest)
|
||||
if err = ensureDir(dest, user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Make sure the destination's parent directory is usable.
|
||||
@@ -118,6 +130,9 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
if err := addURL(d, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setOwner("", d, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -138,13 +153,15 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
// the source directory into the target directory. Try
|
||||
// to create it first, so that if there's a problem,
|
||||
// we'll discover why that won't work.
|
||||
d := dest
|
||||
if err := os.MkdirAll(d, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", d)
|
||||
if err = ensureDir(dest, user, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", d+string(os.PathSeparator)+"*")
|
||||
if err := copyWithTar(gsrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
|
||||
if err := copyWithTar(gsrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, dest)
|
||||
}
|
||||
if err := setOwner(gsrc, dest, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -161,6 +178,9 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
if err := copyFileWithTar(gsrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||
}
|
||||
if err := setOwner(gsrc, d, user); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We're extracting an archive into the destination directory.
|
||||
@@ -172,3 +192,62 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// user returns the user (and group) information which the destination should belong to.
|
||||
func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
|
||||
if userspec == "" {
|
||||
userspec = b.User()
|
||||
}
|
||||
|
||||
uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
|
||||
u := specs.User{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Username: userspec,
|
||||
}
|
||||
return u, err
|
||||
}
|
||||
|
||||
// setOwner sets the uid and gid owners of a given path.
|
||||
func setOwner(src, dest string, user specs.User) error {
|
||||
fid, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", dest)
|
||||
}
|
||||
if !fid.IsDir() || src == "" {
|
||||
if err := os.Lchown(dest, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err, "error setting ownership of %q", dest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = filepath.Walk(src, func(p string, info os.FileInfo, we error) error {
|
||||
relPath, err2 := filepath.Rel(src, p)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error getting relative path of %q to set ownership on destination", p)
|
||||
}
|
||||
if relPath != "." {
|
||||
absPath := filepath.Join(dest, relPath)
|
||||
if err2 := os.Lchown(absPath, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err2, "error setting ownership of %q", absPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error walking dir %q to set ownership", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDir creates a directory if it doesn't exist, setting ownership and permissions as passed by user and perm.
|
||||
func ensureDir(path string, user specs.User, perm os.FileMode) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(path, perm); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring directory %q exists", path)
|
||||
}
|
||||
if err := os.Chown(path, int(user.UID), int(user.GID)); err != nil {
|
||||
return errors.Wrapf(err, "error setting ownership of %q", path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
46
buildah.go
@@ -20,7 +20,7 @@ const (
|
||||
// identify working containers.
|
||||
Package = "buildah"
|
||||
// Version for the Package
|
||||
Version = "0.8"
|
||||
Version = "0.12"
|
||||
// The value we use to identify what type of information, currently a
|
||||
// serialized Builder structure, we are using as per-container state.
|
||||
// This should only be changed when we make incompatible changes to
|
||||
@@ -95,6 +95,46 @@ type Builder struct {
|
||||
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
|
||||
}
|
||||
|
||||
// BuilderInfo are used as objects to display container information
|
||||
type BuilderInfo struct {
|
||||
Type string
|
||||
FromImage string
|
||||
FromImageID string
|
||||
Config string
|
||||
Manifest string
|
||||
Container string
|
||||
ContainerID string
|
||||
MountPoint string
|
||||
ProcessLabel string
|
||||
MountLabel string
|
||||
ImageAnnotations map[string]string
|
||||
ImageCreatedBy string
|
||||
OCIv1 v1.Image
|
||||
Docker docker.V2Image
|
||||
DefaultMountsFilePath string
|
||||
}
|
||||
|
||||
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
|
||||
// This is used in the inspect command to display Manifest and Config as string and not []byte.
|
||||
func GetBuildInfo(b *Builder) BuilderInfo {
|
||||
return BuilderInfo{
|
||||
Type: b.Type,
|
||||
FromImage: b.FromImage,
|
||||
FromImageID: b.FromImageID,
|
||||
Config: string(b.Config),
|
||||
Manifest: string(b.Manifest),
|
||||
Container: b.Container,
|
||||
ContainerID: b.ContainerID,
|
||||
MountPoint: b.MountPoint,
|
||||
ProcessLabel: b.ProcessLabel,
|
||||
ImageAnnotations: b.ImageAnnotations,
|
||||
ImageCreatedBy: b.ImageCreatedBy,
|
||||
OCIv1: b.OCIv1,
|
||||
Docker: b.Docker,
|
||||
DefaultMountsFilePath: b.DefaultMountsFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
// BuilderOptions are used to initialize a new Builder.
|
||||
type BuilderOptions struct {
|
||||
// FromImage is the name of the image which should be used as the
|
||||
@@ -159,6 +199,10 @@ type ImportFromImageOptions struct {
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// github.com/containers/image/types SystemContext to hold information
|
||||
// about which registries we should check for completing image names
|
||||
// that don't include a domain portion.
|
||||
SystemContext *types.SystemContext
|
||||
}
|
||||
|
||||
// NewBuilder creates a new build container.
|
||||
|
||||
@@ -2,10 +2,17 @@ package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
addAndCopyFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "chown",
|
||||
Usage: "Set the user and group ownership of the destination content",
|
||||
},
|
||||
}
|
||||
addDescription = "Adds the contents of a file, URL, or directory to a container's working\n directory. If a local file appears to be an archive, its contents are\n extracted and added instead of the archive file itself."
|
||||
copyDescription = "Copies the contents of a file, URL, or directory into a container's working\n directory"
|
||||
|
||||
@@ -13,6 +20,7 @@ var (
|
||||
Name: "add",
|
||||
Usage: "Add content to the container",
|
||||
Description: addDescription,
|
||||
Flags: addAndCopyFlags,
|
||||
Action: addCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [[FILE | DIRECTORY | URL] ...] [DESTINATION]",
|
||||
}
|
||||
@@ -21,6 +29,7 @@ var (
|
||||
Name: "copy",
|
||||
Usage: "Copy content into the container",
|
||||
Description: copyDescription,
|
||||
Flags: addAndCopyFlags,
|
||||
Action: copyCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [[FILE | DIRECTORY | URL] ...] [DESTINATION]",
|
||||
}
|
||||
@@ -34,7 +43,11 @@ func addAndCopyCmd(c *cli.Context, extractLocalArchives bool) error {
|
||||
name := args[0]
|
||||
args = args.Tail()
|
||||
|
||||
// If list is greater then one, the last item is the destination
|
||||
if err := validateFlags(c, addAndCopyFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If list is greater than one, the last item is the destination
|
||||
dest := ""
|
||||
size := len(args)
|
||||
if size > 1 {
|
||||
@@ -52,8 +65,11 @@ func addAndCopyCmd(c *cli.Context, extractLocalArchives bool) error {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
|
||||
err = builder.Add(dest, extractLocalArchives, args...)
|
||||
if err != nil {
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chown: c.String("chown"),
|
||||
}
|
||||
|
||||
if err := builder.Add(dest, extractLocalArchives, options, args...); err != nil {
|
||||
return errors.Wrapf(err, "error adding content to container %q", builder.Container)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,16 @@ var (
|
||||
Name: "build-arg",
|
||||
Usage: "`argument=value` to supply to the builder",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at the specified path to access the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "file, f",
|
||||
Usage: "`pathname or URL` of a Dockerfile",
|
||||
@@ -181,20 +191,29 @@ func budCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
runtimeFlags := []string{}
|
||||
for _, arg := range c.StringSlice("runtime-flag") {
|
||||
runtimeFlags = append(runtimeFlags, "--"+arg)
|
||||
}
|
||||
|
||||
options := imagebuildah.BuildOptions{
|
||||
ContextDirectory: contextDir,
|
||||
PullPolicy: pullPolicy,
|
||||
Compression: imagebuildah.Gzip,
|
||||
Quiet: c.Bool("quiet"),
|
||||
SignaturePolicyPath: c.String("signature-policy"),
|
||||
SkipTLSVerify: !c.Bool("tls-verify"),
|
||||
Args: args,
|
||||
Output: output,
|
||||
AdditionalTags: tags,
|
||||
Runtime: c.String("runtime"),
|
||||
RuntimeArgs: c.StringSlice("runtime-flag"),
|
||||
RuntimeArgs: runtimeFlags,
|
||||
OutputFormat: format,
|
||||
AuthFilePath: c.String("authfile"),
|
||||
SystemContext: systemContext,
|
||||
}
|
||||
if !c.Bool("quiet") {
|
||||
options.ReportWriter = os.Stderr
|
||||
|
||||
@@ -10,11 +10,16 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
commitFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
@@ -23,7 +28,7 @@ var (
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
@@ -143,7 +148,10 @@ func commitCmd(c *cli.Context) error {
|
||||
}
|
||||
err = builder.Commit(dest, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error committing container %q to %q", builder.Container, image)
|
||||
return util.GetFailureCause(
|
||||
err,
|
||||
errors.Wrapf(err, "error committing container %q to %q", builder.Container, image),
|
||||
)
|
||||
}
|
||||
|
||||
if c.Bool("rm") {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
var needToShutdownStore = false
|
||||
@@ -65,9 +66,10 @@ func openBuilders(store storage.Store) (builders []*buildah.Builder, err error)
|
||||
return buildah.OpenAllBuilders(store)
|
||||
}
|
||||
|
||||
func openImage(store storage.Store, name string) (builder *buildah.Builder, err error) {
|
||||
func openImage(sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) {
|
||||
options := buildah.ImportFromImageOptions{
|
||||
Image: name,
|
||||
Image: name,
|
||||
SystemContext: sc,
|
||||
}
|
||||
builder, err = buildah.ImportBuilderFromImage(store, options)
|
||||
if err != nil {
|
||||
@@ -82,7 +84,7 @@ func openImage(store storage.Store, name string) (builder *buildah.Builder, err
|
||||
func getDateAndDigestAndSize(image storage.Image, store storage.Store) (time.Time, string, int64, error) {
|
||||
created := time.Time{}
|
||||
is.Transport.SetStore(store)
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, "@"+image.ID)
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return created, "", -1, err
|
||||
}
|
||||
@@ -136,28 +138,44 @@ func systemContextFromOptions(c *cli.Context) (*types.SystemContext, error) {
|
||||
if c.IsSet("authfile") {
|
||||
ctx.AuthFilePath = c.String("authfile")
|
||||
}
|
||||
if c.GlobalIsSet("registries-conf") {
|
||||
ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf")
|
||||
}
|
||||
if c.GlobalIsSet("registries-conf-dir") {
|
||||
ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir")
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
func parseCreds(creds string) (string, string) {
|
||||
if creds == "" {
|
||||
return "", "", errors.Wrapf(syscall.EINVAL, "credentials can't be empty")
|
||||
return "", ""
|
||||
}
|
||||
up := strings.SplitN(creds, ":", 2)
|
||||
if len(up) == 1 {
|
||||
return up[0], "", nil
|
||||
return up[0], ""
|
||||
}
|
||||
if up[0] == "" {
|
||||
return "", "", errors.Wrapf(syscall.EINVAL, "username can't be empty")
|
||||
return "", up[1]
|
||||
}
|
||||
return up[0], up[1], nil
|
||||
return up[0], up[1]
|
||||
}
|
||||
|
||||
func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
username, password, err := parseCreds(creds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
username, password := parseCreds(creds)
|
||||
if username == "" {
|
||||
fmt.Print("Username: ")
|
||||
fmt.Scanln(&username)
|
||||
}
|
||||
if password == "" {
|
||||
fmt.Print("Password: ")
|
||||
termPassword, err := terminal.ReadPassword(0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read password from terminal")
|
||||
}
|
||||
password = string(termPassword)
|
||||
}
|
||||
|
||||
return &types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
|
||||
@@ -3,7 +3,11 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
@@ -17,12 +21,43 @@ type jsonContainer struct {
|
||||
ContainerName string `json:"containername"`
|
||||
}
|
||||
|
||||
type containerOutputParams struct {
|
||||
ContainerID string
|
||||
Builder string
|
||||
ImageID string
|
||||
ImageName string
|
||||
ContainerName string
|
||||
}
|
||||
|
||||
type containerOptions struct {
|
||||
all bool
|
||||
format string
|
||||
json bool
|
||||
noHeading bool
|
||||
noTruncate bool
|
||||
quiet bool
|
||||
}
|
||||
|
||||
type containerFilterParams struct {
|
||||
id string
|
||||
name string
|
||||
ancestor string
|
||||
}
|
||||
|
||||
var (
|
||||
containersFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "also list non-buildah containers",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter, f",
|
||||
Usage: "filter output based on conditions provided",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print containers using a Go template",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output in JSON format",
|
||||
@@ -60,38 +95,35 @@ func containersCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
quiet := c.Bool("quiet")
|
||||
truncate := !c.Bool("notruncate")
|
||||
JSONContainers := []jsonContainer{}
|
||||
jsonOut := c.Bool("json")
|
||||
if c.IsSet("quiet") && c.IsSet("format") {
|
||||
return errors.Errorf("quiet and format are mutually exclusive")
|
||||
}
|
||||
|
||||
list := func(n int, containerID, imageID, image, container string, isBuilder bool) {
|
||||
if jsonOut {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: containerID, Builder: isBuilder, ImageID: imageID, ImageName: image, ContainerName: container})
|
||||
return
|
||||
}
|
||||
opts := containerOptions{
|
||||
all: c.Bool("all"),
|
||||
format: c.String("format"),
|
||||
json: c.Bool("json"),
|
||||
noHeading: c.Bool("noheading"),
|
||||
noTruncate: c.Bool("notruncate"),
|
||||
quiet: c.Bool("quiet"),
|
||||
}
|
||||
|
||||
if n == 0 && !c.Bool("noheading") && !quiet {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
}
|
||||
}
|
||||
if quiet {
|
||||
fmt.Printf("%s\n", containerID)
|
||||
} else {
|
||||
isBuilderValue := ""
|
||||
if isBuilder {
|
||||
isBuilderValue = " *"
|
||||
}
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", containerID, isBuilderValue, imageID, image, container)
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", containerID, isBuilderValue, imageID, image, container)
|
||||
}
|
||||
var params *containerFilterParams
|
||||
if c.IsSet("filter") {
|
||||
params, err = parseCtrFilter(c.String("filter"))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing filter")
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.noHeading && !opts.quiet && opts.format == "" && !opts.json {
|
||||
containerOutputHeader(!opts.noTruncate)
|
||||
}
|
||||
|
||||
return outputContainers(store, opts, params)
|
||||
}
|
||||
|
||||
func outputContainers(store storage.Store, opts containerOptions, params *containerFilterParams) error {
|
||||
seenImages := make(map[string]string)
|
||||
imageNameForID := func(id string) string {
|
||||
if id == "" {
|
||||
@@ -112,12 +144,36 @@ func containersCmd(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build containers")
|
||||
}
|
||||
if !c.Bool("all") {
|
||||
for i, builder := range builders {
|
||||
var (
|
||||
containerOutput []containerOutputParams
|
||||
JSONContainers []jsonContainer
|
||||
)
|
||||
if !opts.all {
|
||||
// only output containers created by buildah
|
||||
for _, builder := range builders {
|
||||
image := imageNameForID(builder.FromImageID)
|
||||
list(i, builder.ContainerID, builder.FromImageID, image, builder.Container, true)
|
||||
if !matchesCtrFilter(builder.ContainerID, builder.Container, builder.FromImageID, image, params) {
|
||||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: builder.ContainerID,
|
||||
Builder: true,
|
||||
ImageID: builder.FromImageID,
|
||||
ImageName: image,
|
||||
ContainerName: builder.Container})
|
||||
continue
|
||||
}
|
||||
output := containerOutputParams{
|
||||
ContainerID: builder.ContainerID,
|
||||
Builder: " *",
|
||||
ImageID: builder.FromImageID,
|
||||
ImageName: image,
|
||||
ContainerName: builder.Container,
|
||||
}
|
||||
containerOutput = append(containerOutput, output)
|
||||
}
|
||||
} else {
|
||||
// output all containers currently in storage
|
||||
builderMap := make(map[string]struct{})
|
||||
for _, builder := range builders {
|
||||
builderMap[builder.ContainerID] = struct{}{}
|
||||
@@ -126,22 +182,136 @@ func containersCmd(c *cli.Context) error {
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading list of all containers")
|
||||
}
|
||||
for i, container := range containers {
|
||||
for _, container := range containers {
|
||||
name := ""
|
||||
if len(container.Names) > 0 {
|
||||
name = container.Names[0]
|
||||
}
|
||||
_, ours := builderMap[container.ID]
|
||||
list(i, container.ID, container.ImageID, imageNameForID(container.ImageID), name, ours)
|
||||
builder := ""
|
||||
if ours {
|
||||
builder = " *"
|
||||
}
|
||||
if !matchesCtrFilter(container.ID, name, container.ImageID, imageNameForID(container.ImageID), params) {
|
||||
continue
|
||||
}
|
||||
if opts.json {
|
||||
JSONContainers = append(JSONContainers, jsonContainer{ID: container.ID,
|
||||
Builder: ours,
|
||||
ImageID: container.ImageID,
|
||||
ImageName: imageNameForID(container.ImageID),
|
||||
ContainerName: name})
|
||||
}
|
||||
output := containerOutputParams{
|
||||
ContainerID: container.ID,
|
||||
Builder: builder,
|
||||
ImageID: container.ImageID,
|
||||
ImageName: imageNameForID(container.ImageID),
|
||||
ContainerName: name,
|
||||
}
|
||||
containerOutput = append(containerOutput, output)
|
||||
}
|
||||
}
|
||||
if jsonOut {
|
||||
if opts.json {
|
||||
data, err := json.MarshalIndent(JSONContainers, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s\n", data)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, ctr := range containerOutput {
|
||||
if opts.quiet {
|
||||
fmt.Printf("%-64s\n", ctr.ContainerID)
|
||||
continue
|
||||
}
|
||||
if opts.format != "" {
|
||||
if err := containerOutputUsingTemplate(opts.format, ctr); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
containerOutputUsingFormatString(!opts.noTruncate, ctr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerOutputUsingTemplate(format string, params containerOutputParams) error {
|
||||
tmpl, err := template.New("container").Parse(format)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Template parsing error")
|
||||
}
|
||||
|
||||
err = tmpl.Execute(os.Stdout, params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerOutputUsingFormatString(truncate bool, params containerOutputParams) {
|
||||
if truncate {
|
||||
fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, params.ImageName, params.ContainerName)
|
||||
}
|
||||
}
|
||||
|
||||
func containerOutputHeader(truncate bool) {
|
||||
if truncate {
|
||||
fmt.Printf("%-12s %-8s %-12s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
} else {
|
||||
fmt.Printf("%-64s %-8s %-64s %-32s %s\n", "CONTAINER ID", "BUILDER", "IMAGE ID", "IMAGE NAME", "CONTAINER NAME")
|
||||
}
|
||||
}
|
||||
|
||||
func parseCtrFilter(filter string) (*containerFilterParams, error) {
|
||||
params := new(containerFilterParams)
|
||||
filters := strings.Split(filter, ",")
|
||||
for _, param := range filters {
|
||||
pair := strings.SplitN(param, "=", 2)
|
||||
if len(pair) != 2 {
|
||||
return nil, errors.Errorf("incorrect filter value %q, should be of form filter=value", param)
|
||||
}
|
||||
switch strings.TrimSpace(pair[0]) {
|
||||
case "id":
|
||||
params.id = pair[1]
|
||||
case "name":
|
||||
params.name = pair[1]
|
||||
case "ancestor":
|
||||
params.ancestor = pair[1]
|
||||
default:
|
||||
return nil, errors.Errorf("invalid filter %q", pair[0])
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func matchesCtrName(ctrName, argName string) bool {
|
||||
return strings.Contains(ctrName, argName)
|
||||
}
|
||||
|
||||
func matchesAncestor(imgName, imgID, argName string) bool {
|
||||
if matchesID(imgID, argName) {
|
||||
return true
|
||||
}
|
||||
return matchesReference(imgName, argName)
|
||||
}
|
||||
|
||||
func matchesCtrFilter(ctrID, ctrName, imgID, imgName string, params *containerFilterParams) bool {
|
||||
if params == nil {
|
||||
return true
|
||||
}
|
||||
if params.id != "" && !matchesID(ctrID, params.id) {
|
||||
return false
|
||||
}
|
||||
if params.name != "" && !matchesCtrName(ctrName, params.name) {
|
||||
return false
|
||||
}
|
||||
if params.ancestor != "" && !matchesAncestor(imgName, imgID, params.ancestor) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ var (
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
@@ -63,7 +63,6 @@ var (
|
||||
)
|
||||
|
||||
func fromCmd(c *cli.Context) error {
|
||||
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
return errors.Errorf("an image name (or \"scratch\") must be specified")
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
type jsonImage struct {
|
||||
@@ -51,7 +52,7 @@ var (
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "pretty-print images using a Go template. will override --quiet",
|
||||
Usage: "pretty-print images using a Go template",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
@@ -96,6 +97,10 @@ func imagesCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
|
||||
if c.IsSet("quiet") && c.IsSet("format") {
|
||||
return errors.Errorf("quiet and format are mutually exclusive")
|
||||
}
|
||||
|
||||
quiet := c.Bool("quiet")
|
||||
truncate := !c.Bool("no-trunc")
|
||||
digests := c.Bool("digests")
|
||||
@@ -125,8 +130,6 @@ func imagesCmd(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing filter")
|
||||
}
|
||||
} else {
|
||||
params = nil
|
||||
}
|
||||
|
||||
if len(images) > 0 && !c.Bool("noheading") && !quiet && !hasTemplate {
|
||||
@@ -178,7 +181,7 @@ func setFilterDate(store storage.Store, images []storage.Image, imgName string)
|
||||
for _, name := range image.Names {
|
||||
if matchesReference(name, imgName) {
|
||||
// Set the date to this image
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+image.ID)
|
||||
ref, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("error parsing reference to image %q: %v", image.ID, err)
|
||||
}
|
||||
@@ -290,7 +293,7 @@ func matchesDangling(name string, dangling string) bool {
|
||||
}
|
||||
|
||||
func matchesLabel(image storage.Image, store storage.Store, label string) bool {
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, "@"+image.ID)
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -322,19 +325,13 @@ func matchesLabel(image storage.Image, store storage.Store, label string) bool {
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesBeforeImage(image storage.Image, name string, params *filterParams) bool {
|
||||
if image.Created.IsZero() || image.Created.Before(params.beforeDate) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return image.Created.IsZero() || image.Created.Before(params.beforeDate)
|
||||
}
|
||||
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesSinceImage(image storage.Image, name string, params *filterParams) bool {
|
||||
if image.Created.IsZero() || image.Created.After(params.sinceDate) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return image.Created.IsZero() || image.Created.After(params.sinceDate)
|
||||
}
|
||||
|
||||
func matchesID(imageID, argID string) bool {
|
||||
@@ -376,7 +373,9 @@ func outputUsingTemplate(format string, params imageOutputParams) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println()
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@ func TestOutputImagesFormatString(t *testing.T) {
|
||||
output, err := captureOutputWithError(func() error {
|
||||
return outputImages(images[:1], "{{.ID}}", store, nil, "", true, true, false, false)
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%s", images[0].ID)
|
||||
expectedOutput := images[0].ID
|
||||
if err != nil {
|
||||
t.Error("format string output produces error")
|
||||
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,6 +57,11 @@ func inspectCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
systemContext, err := systemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building system context")
|
||||
}
|
||||
|
||||
format := defaultFormat
|
||||
if c.String("format") != "" {
|
||||
format = c.String("format")
|
||||
@@ -76,13 +82,13 @@ func inspectCmd(c *cli.Context) error {
|
||||
if c.IsSet("type") {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
builder, err = openImage(store, name)
|
||||
builder, err = openImage(systemContext, store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading build object %q", name)
|
||||
}
|
||||
}
|
||||
case inspectTypeImage:
|
||||
builder, err = openImage(store, name)
|
||||
builder, err = openImage(systemContext, store, name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading image %q", name)
|
||||
}
|
||||
@@ -91,13 +97,19 @@ func inspectCmd(c *cli.Context) error {
|
||||
}
|
||||
|
||||
if c.IsSet("format") {
|
||||
return t.Execute(os.Stdout, builder)
|
||||
if err := t.Execute(os.Stdout, buildah.GetBuildInfo(builder)); err != nil {
|
||||
return err
|
||||
}
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
fmt.Println()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(builder, "", " ")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error encoding build container as json")
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
enc.SetEscapeHTML(false)
|
||||
}
|
||||
_, err = fmt.Println(string(b))
|
||||
return err
|
||||
return enc.Encode(builder)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,14 @@ func main() {
|
||||
Name: "debug",
|
||||
Usage: "print debugging information",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to registries.conf file (not usually used)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf-dir",
|
||||
Usage: "path to registries.conf.d directory (not usually used)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "root",
|
||||
Usage: "storage root dir",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -29,7 +30,7 @@ var (
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "use `username[:password]` for accessing the registry",
|
||||
Usage: "use `[username[:password]]` for accessing the registry",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "disable-compression, D",
|
||||
@@ -141,7 +142,10 @@ func pushCmd(c *cli.Context) error {
|
||||
|
||||
err = buildah.Push(src, dest, options)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error pushing image %q to %q", src, destSpec)
|
||||
return util.GetFailureCause(
|
||||
err,
|
||||
errors.Wrapf(err, "error pushing image %q to %q", src, destSpec),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -10,19 +11,35 @@ import (
|
||||
|
||||
var (
|
||||
rmDescription = "Removes one or more working containers, unmounting them if necessary"
|
||||
rmCommand = cli.Command{
|
||||
rmFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "remove all containers",
|
||||
},
|
||||
}
|
||||
rmCommand = cli.Command{
|
||||
Name: "rm",
|
||||
Aliases: []string{"delete"},
|
||||
Usage: "Remove one or more working containers",
|
||||
Description: rmDescription,
|
||||
Action: rmCmd,
|
||||
ArgsUsage: "CONTAINER-NAME-OR-ID [...]",
|
||||
Flags: rmFlags,
|
||||
}
|
||||
)
|
||||
|
||||
// writeError writes `lastError` into `w` if not nil and return the next error `err`
|
||||
func writeError(w io.Writer, err error, lastError error) error {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(w, lastError)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func rmCmd(c *cli.Context) error {
|
||||
delContainerErrStr := "error removing container"
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
if len(args) == 0 && !c.Bool("all") {
|
||||
return errors.Errorf("container ID must be specified")
|
||||
}
|
||||
store, err := getStore(c)
|
||||
@@ -30,28 +47,36 @@ func rmCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var e error
|
||||
for _, name := range args {
|
||||
builder, err := openBuilder(store, name)
|
||||
if e == nil {
|
||||
e = err
|
||||
}
|
||||
var lastError error
|
||||
if c.Bool("all") {
|
||||
builders, err := openBuilders(store)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error reading build container %q: %v\n", name, err)
|
||||
continue
|
||||
return errors.Wrapf(err, "error reading build containers")
|
||||
}
|
||||
|
||||
id := builder.ContainerID
|
||||
err = builder.Delete()
|
||||
if e == nil {
|
||||
e = err
|
||||
for _, builder := range builders {
|
||||
id := builder.ContainerID
|
||||
if err = builder.Delete(); err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, builder.Container), lastError)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error removing container %q: %v\n", builder.Container, err)
|
||||
continue
|
||||
} else {
|
||||
for _, name := range args {
|
||||
builder, err := openBuilder(store, name)
|
||||
if err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, name), lastError)
|
||||
continue
|
||||
}
|
||||
id := builder.ContainerID
|
||||
if err = builder.Delete(); err != nil {
|
||||
lastError = writeError(os.Stderr, errors.Wrapf(err, "%s %q", delContainerErrStr, name), lastError)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
|
||||
}
|
||||
|
||||
return e
|
||||
return lastError
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
@@ -16,9 +17,17 @@ import (
|
||||
var (
|
||||
rmiDescription = "removes one or more locally stored images."
|
||||
rmiFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "remove all images",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "prune, p",
|
||||
Usage: "prune dangling images",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "force, f",
|
||||
Usage: "force removal of the image",
|
||||
Usage: "force removal of the image and any containers using the image",
|
||||
},
|
||||
}
|
||||
rmiCommand = cli.Command{
|
||||
@@ -33,11 +42,20 @@ var (
|
||||
|
||||
func rmiCmd(c *cli.Context) error {
|
||||
force := c.Bool("force")
|
||||
removeAll := c.Bool("all")
|
||||
pruneDangling := c.Bool("prune")
|
||||
|
||||
args := c.Args()
|
||||
if len(args) == 0 {
|
||||
if len(args) == 0 && !removeAll && !pruneDangling {
|
||||
return errors.Errorf("image name or ID must be specified")
|
||||
}
|
||||
if len(args) > 0 && removeAll {
|
||||
return errors.Errorf("when using the --all switch, you may not pass any images names or IDs")
|
||||
}
|
||||
if removeAll && pruneDangling {
|
||||
return errors.Errorf("when using the --all switch, you may not use --prune switch")
|
||||
}
|
||||
|
||||
if err := validateFlags(c, rmiFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -47,26 +65,62 @@ func rmiCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range args {
|
||||
image, err := getImage(id, store)
|
||||
imagesToDelete := args[:]
|
||||
var lastError error
|
||||
|
||||
if removeAll {
|
||||
imagesToDelete, err = findAllImages(store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get image %q", id)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pruneDangling {
|
||||
imagesToDelete, err = findDanglingImages(store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range imagesToDelete {
|
||||
image, err := getImage(id, store)
|
||||
if err != nil || image == nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
if err == nil {
|
||||
err = storage.ErrNotAnImage
|
||||
}
|
||||
lastError = errors.Wrapf(err, "could not get image %q", id)
|
||||
continue
|
||||
}
|
||||
if image != nil {
|
||||
ctrIDs, err := runningContainers(image, store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting running containers for image %q", id)
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error getting running containers for image %q", id)
|
||||
continue
|
||||
}
|
||||
if len(ctrIDs) > 0 && len(image.Names) <= 1 {
|
||||
if force {
|
||||
err = removeContainers(ctrIDs, store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error removing containers %v for image %q", ctrIDs, id)
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error removing containers %v for image %q", ctrIDs, id)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
for _, ctrID := range ctrIDs {
|
||||
return fmt.Errorf("Could not remove image %q (must force) - container %q is using its reference image", id, ctrID)
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(storage.ErrImageUsedByContainer, "Could not remove image %q (must force) - container %q is using its reference image", id, ctrID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags
|
||||
@@ -79,7 +133,11 @@ func rmiCmd(c *cli.Context) error {
|
||||
} else {
|
||||
name, err2 := untagImage(id, image, store)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err, "error removing tag %q from image %q", id, image.ID)
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err2, "error removing tag %q from image %q", id, image.ID)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("untagged: %s\n", name)
|
||||
}
|
||||
@@ -89,13 +147,17 @@ func rmiCmd(c *cli.Context) error {
|
||||
}
|
||||
id, err := removeImage(image, store)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error removing image %q", image.ID)
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "error removing image %q", image.ID)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return lastError
|
||||
}
|
||||
|
||||
func getImage(id string, store storage.Store) (*storage.Image, error) {
|
||||
@@ -178,7 +240,7 @@ func removeContainers(ctrIDs []string, store storage.Store) error {
|
||||
func properImageRef(id string) (types.ImageReference, error) {
|
||||
var err error
|
||||
if ref, err := alltransports.ParseImageName(id); err == nil {
|
||||
if img, err2 := ref.NewImage(nil); err2 == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
@@ -192,7 +254,7 @@ func properImageRef(id string) (types.ImageReference, error) {
|
||||
func storageImageRef(store storage.Store, id string) (types.ImageReference, error) {
|
||||
var err error
|
||||
if ref, err := is.Transport.ParseStoreReference(store, id); err == nil {
|
||||
if img, err2 := ref.NewImage(nil); err2 == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
@@ -210,12 +272,44 @@ func storageImageID(store storage.Store, id string) (types.ImageReference, error
|
||||
if img, err := store.Image(id); err == nil && img != nil {
|
||||
imageID = img.ID
|
||||
}
|
||||
if ref, err := is.Transport.ParseStoreReference(store, "@"+imageID); err == nil {
|
||||
if img, err2 := ref.NewImage(nil); err2 == nil {
|
||||
if ref, err := is.Transport.ParseStoreReference(store, imageID); err == nil {
|
||||
if img, err2 := ref.NewImageSource(nil); err2 == nil {
|
||||
img.Close()
|
||||
return ref, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error confirming presence of storage image reference %q", transports.ImageName(ref))
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error parsing %q as a storage image reference: %v", "@"+id)
|
||||
return nil, errors.Wrapf(err, "error parsing %q as a storage image reference: %v", id)
|
||||
}
|
||||
|
||||
// Returns a list of all existing images
|
||||
func findAllImages(store storage.Store) ([]string, error) {
|
||||
imagesToDelete := []string{}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
for _, image := range images {
|
||||
imagesToDelete = append(imagesToDelete, image.ID)
|
||||
}
|
||||
|
||||
return imagesToDelete, nil
|
||||
}
|
||||
|
||||
// Returns a list of all dangling images
|
||||
func findDanglingImages(store storage.Store) ([]string, error) {
|
||||
imagesToDelete := []string{}
|
||||
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading images")
|
||||
}
|
||||
for _, image := range images {
|
||||
if len(image.Names) == 0 {
|
||||
imagesToDelete = append(imagesToDelete, image.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return imagesToDelete, nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ var (
|
||||
runFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "Set the hostname inside of the container",
|
||||
Usage: "set the hostname inside of the container",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "runtime",
|
||||
@@ -28,6 +28,10 @@ var (
|
||||
Name: "runtime-flag",
|
||||
Usage: "add global flags for the container runtime",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "security-opt",
|
||||
Usage: "security Options (default [])",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tty",
|
||||
Usage: "allocate a pseudo-TTY in the container",
|
||||
@@ -73,10 +77,15 @@ func runCmd(c *cli.Context) error {
|
||||
return errors.Wrapf(err, "error reading build container %q", name)
|
||||
}
|
||||
|
||||
runtimeFlags := []string{}
|
||||
for _, arg := range c.StringSlice("runtime-flag") {
|
||||
runtimeFlags = append(runtimeFlags, "--"+arg)
|
||||
}
|
||||
|
||||
options := buildah.RunOptions{
|
||||
Hostname: c.String("hostname"),
|
||||
Runtime: c.String("runtime"),
|
||||
Args: c.StringSlice("runtime-flag"),
|
||||
Args: runtimeFlags,
|
||||
}
|
||||
|
||||
if c.IsSet("tty") {
|
||||
|
||||
162
commit.go
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
cp "github.com/containers/image/copy"
|
||||
@@ -19,17 +20,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (just 1024 zero bytes). This
|
||||
// comes from github.com/docker/distribution/manifest/schema1/config_builder.go by way of
|
||||
// github.com/containers/image/image/docker_schema2.go; there is a non-zero embedded timestamp; we could
|
||||
// zero that, but that would just waste storage space in registries, so let’s use the same values.
|
||||
gzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
)
|
||||
|
||||
// CommitOptions can be used to alter how an image is committed.
|
||||
type CommitOptions struct {
|
||||
// PreferredManifestType is the preferred type of image manifest. The
|
||||
@@ -90,7 +80,7 @@ type PushOptions struct {
|
||||
// almost any other destination has higher expectations.
|
||||
// We assume that "dest" is a reference to a local image (specifically, a containers/image/storage.storageReference),
|
||||
// and will fail if it isn't.
|
||||
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext) error {
|
||||
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext, compression archive.Compression) error {
|
||||
var names []string
|
||||
// Read the target image name.
|
||||
if dest.DockerReference() != nil {
|
||||
@@ -106,10 +96,34 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening image %q for writing", transports.ImageName(dest))
|
||||
}
|
||||
// Write an empty filesystem layer, because the image layer requires at least one.
|
||||
_, err = destImage.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Size: int64(len(gzippedEmptyLayer))})
|
||||
// Look up the container's read-write layer.
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing dummy layer for image %q", transports.ImageName(dest))
|
||||
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
||||
}
|
||||
// Extract the read-write layer's contents, using whatever compression the container image used to
|
||||
// calculate the blob sum in the manifest.
|
||||
switch compression {
|
||||
case archive.Gzip:
|
||||
logrus.Debugf("extracting layer %q with gzip", container.LayerID)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return errors.Wrapf(syscall.ENOTSUP, "media type for bzip2-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("extracting layer %q with unknown compressor(?)", container.LayerID)
|
||||
}
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &compression,
|
||||
}
|
||||
layerDiff, err := b.store.Diff("", container.LayerID, diffOptions)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading layer %q from source image %q", container.LayerID, transports.ImageName(src))
|
||||
}
|
||||
defer layerDiff.Close()
|
||||
// Write a copy of the layer as a blob, for the new image to reference.
|
||||
if _, err = destImage.PutBlob(layerDiff, types.BlobInfo{Digest: "", Size: -1}); err != nil {
|
||||
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
||||
}
|
||||
// Read the newly-generated configuration blob.
|
||||
config, err := srcImage.ConfigBlob()
|
||||
@@ -125,11 +139,10 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
Digest: digest.Canonical.FromBytes(config),
|
||||
Size: int64(len(config)),
|
||||
}
|
||||
_, err = destImage.PutBlob(bytes.NewReader(config), configBlobInfo)
|
||||
if err != nil && len(config) > 0 {
|
||||
if _, err = destImage.PutBlob(bytes.NewReader(config), configBlobInfo); err != nil {
|
||||
return errors.Wrapf(err, "error writing image configuration for temporary copy of %q", transports.ImageName(dest))
|
||||
}
|
||||
// Read the newly-generated, mostly fake, manifest.
|
||||
// Read the newly-generated manifest, which already contains a layer entry for the read-write layer.
|
||||
manifest, _, err := srcImage.Manifest()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading new manifest for image %q", transports.ImageName(dest))
|
||||
@@ -148,79 +161,9 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error closing new image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Locate the new image in the lower-level API. Extract its items.
|
||||
destImg, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
image, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error locating new image %q", transports.ImageName(dest))
|
||||
}
|
||||
items, err := b.store.ListImageBigData(destImg.ID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading list of named data for image %q", destImg.ID)
|
||||
}
|
||||
bigdata := make(map[string][]byte)
|
||||
for _, itemName := range items {
|
||||
var data []byte
|
||||
data, err = b.store.ImageBigData(destImg.ID, itemName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading named data %q for image %q", itemName, destImg.ID)
|
||||
}
|
||||
bigdata[itemName] = data
|
||||
}
|
||||
// Delete the image so that we can recreate it.
|
||||
_, err = b.store.DeleteImage(destImg.ID, true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error deleting image %q for rewriting", destImg.ID)
|
||||
}
|
||||
// Look up the container's read-write layer.
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
||||
}
|
||||
parentLayer := ""
|
||||
// Look up the container's source image's layer, if there is a source image.
|
||||
if container.ImageID != "" {
|
||||
img, err2 := b.store.Image(container.ImageID)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading information about working container %q's source image", b.ContainerID)
|
||||
}
|
||||
parentLayer = img.TopLayer
|
||||
}
|
||||
// Extract the read-write layer's contents.
|
||||
layerDiff, err := b.store.Diff(parentLayer, container.LayerID, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading layer %q from source image %q", container.LayerID, transports.ImageName(src))
|
||||
}
|
||||
defer layerDiff.Close()
|
||||
// Write a copy of the layer for the new image to reference.
|
||||
layer, _, err := b.store.PutLayer("", parentLayer, []string{}, "", false, layerDiff)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
||||
}
|
||||
// Create a low-level image record that uses the new layer, discarding the old metadata.
|
||||
image, err := b.store.CreateImage(destImg.ID, []string{}, layer.ID, "{}", nil)
|
||||
if err != nil {
|
||||
err2 := b.store.DeleteLayer(layer.ID)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error removing layer %q: %v", layer, err2)
|
||||
}
|
||||
return errors.Wrapf(err, "error creating new low-level image %q", transports.ImageName(dest))
|
||||
}
|
||||
logrus.Debugf("(re-)created image ID %q using layer %q", image.ID, layer.ID)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_, err2 := b.store.DeleteImage(image.ID, true)
|
||||
if err2 != nil {
|
||||
logrus.Debugf("error removing image %q: %v", image.ID, err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Store the configuration and manifest, which are big data items, along with whatever else is there.
|
||||
for itemName, data := range bigdata {
|
||||
err = b.store.SetImageBigData(image.ID, itemName, data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error saving data item %q", itemName)
|
||||
}
|
||||
logrus.Debugf("saved data item %q to %q", itemName, image.ID)
|
||||
return errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||
}
|
||||
// Add the target name(s) to the new image.
|
||||
if len(names) > 0 {
|
||||
@@ -237,7 +180,7 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
||||
// configuration, to a new image in the specified location, and if we know how,
|
||||
// add any additional tags that were specified.
|
||||
func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error {
|
||||
policy, err := signature.DefaultPolicy(getSystemContext(options.SignaturePolicyPath))
|
||||
policy, err := signature.DefaultPolicy(getSystemContext(options.SystemContext, options.SignaturePolicyPath))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error obtaining default signature policy")
|
||||
}
|
||||
@@ -247,13 +190,13 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
||||
}
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
logrus.Debugf("error destroying signature polcy context: %v", err2)
|
||||
logrus.Debugf("error destroying signature policy context: %v", err2)
|
||||
}
|
||||
}()
|
||||
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
|
||||
_, destIsStorage := dest.Transport().(is.StoreTransport)
|
||||
exporting := !destIsStorage
|
||||
src, err := b.makeContainerImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
||||
src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing layer digests and building metadata")
|
||||
}
|
||||
@@ -265,7 +208,7 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
||||
}
|
||||
} else {
|
||||
// Copy only the most recent layer, the configuration, and the manifest.
|
||||
err = b.shallowCopy(dest, src, getSystemContext(options.SignaturePolicyPath))
|
||||
err = b.shallowCopy(dest, src, getSystemContext(options.SystemContext, options.SignaturePolicyPath), options.Compression)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layer and metadata")
|
||||
}
|
||||
@@ -291,7 +234,7 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
||||
|
||||
// Push copies the contents of the image to a new location.
|
||||
func Push(image string, dest types.ImageReference, options PushOptions) error {
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
policy, err := signature.DefaultPolicy(systemContext)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error obtaining default signature policy")
|
||||
@@ -300,36 +243,11 @@ func Push(image string, dest types.ImageReference, options PushOptions) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new signature policy context")
|
||||
}
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
logrus.Debugf("error destroying signature polcy context: %v", err2)
|
||||
}
|
||||
}()
|
||||
importOptions := ImportFromImageOptions{
|
||||
Image: image,
|
||||
SignaturePolicyPath: options.SignaturePolicyPath,
|
||||
}
|
||||
builder, err := importBuilderFromImage(options.Store, importOptions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error importing builder information from image")
|
||||
}
|
||||
// Look up the image name and its layer.
|
||||
ref, err := is.Transport.ParseStoreReference(options.Store, image)
|
||||
// Look up the image.
|
||||
src, err := is.Transport.ParseStoreReference(options.Store, image)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||
}
|
||||
img, err := is.Transport.GetStoreImage(options.Store, ref)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error locating image %q", image)
|
||||
}
|
||||
// Give the image we're producing the same ancestors as its source image.
|
||||
builder.FromImage = builder.Docker.ContainerConfig.Image
|
||||
builder.FromImageID = string(builder.Docker.Parent)
|
||||
// Prep the layers and manifest for export.
|
||||
src, err := builder.makeImageImageRef(options.Compression, img.Names, img.TopLayer, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error recomputing layer digests and building metadata")
|
||||
}
|
||||
// Copy everything.
|
||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, options.ManifestType))
|
||||
if err != nil {
|
||||
|
||||
@@ -16,8 +16,11 @@ func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemCon
|
||||
}
|
||||
}
|
||||
|
||||
func getSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if defaults != nil {
|
||||
*sc = *defaults
|
||||
}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
|
||||
38
config.go
@@ -2,7 +2,6 @@ package buildah
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -139,23 +138,30 @@ func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) {
|
||||
}
|
||||
// Build a filesystem history.
|
||||
history := []docker.V2S2History{}
|
||||
lastID := ""
|
||||
for i := range manifest.History {
|
||||
h := docker.V2S2History{
|
||||
Created: time.Now().UTC(),
|
||||
Author: "",
|
||||
CreatedBy: "",
|
||||
Comment: "",
|
||||
EmptyLayer: false,
|
||||
}
|
||||
// Decode the compatibility field.
|
||||
dcompat := docker.V1Compatibility{}
|
||||
if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil {
|
||||
h.Created = dcompat.Created.UTC()
|
||||
h.Author = dcompat.Author
|
||||
h.Comment = dcompat.Comment
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd)
|
||||
}
|
||||
h.EmptyLayer = dcompat.ThrowAway
|
||||
if err = json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err != nil {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image compatibility data (%q) from history", manifest.History[i].V1Compatibility)
|
||||
}
|
||||
// Skip this history item if it shares the ID of the last one
|
||||
// that we saw, since the image library will do the same.
|
||||
if i > 0 && dcompat.ID == lastID {
|
||||
continue
|
||||
}
|
||||
lastID = dcompat.ID
|
||||
// Construct a new history item using the recovered information.
|
||||
createdBy := ""
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
createdBy = strings.Join(dcompat.ContainerConfig.Cmd, " ")
|
||||
}
|
||||
h := docker.V2S2History{
|
||||
Created: dcompat.Created.UTC(),
|
||||
Author: dcompat.Author,
|
||||
CreatedBy: createdBy,
|
||||
Comment: dcompat.Comment,
|
||||
EmptyLayer: dcompat.ThrowAway,
|
||||
}
|
||||
// Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order
|
||||
// compared to v2s2, which lists earlier layers before later ones.
|
||||
|
||||
@@ -210,6 +210,10 @@ return 1
|
||||
|
||||
_buildah_rmi() {
|
||||
local boolean_options="
|
||||
--all
|
||||
-a
|
||||
--force
|
||||
-f
|
||||
--help
|
||||
-h
|
||||
"
|
||||
@@ -226,6 +230,8 @@ return 1
|
||||
|
||||
_buildah_rm() {
|
||||
local boolean_options="
|
||||
--all
|
||||
-a
|
||||
--help
|
||||
-h
|
||||
"
|
||||
@@ -296,6 +302,7 @@ return 1
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--cert-dir
|
||||
--creds
|
||||
--signature-policy
|
||||
@@ -346,15 +353,19 @@ return 1
|
||||
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--signature-policy
|
||||
--build-arg
|
||||
--cert-dir
|
||||
--creds
|
||||
-f
|
||||
--file
|
||||
--format
|
||||
--label
|
||||
--runtime
|
||||
--runtime-flag
|
||||
--tag
|
||||
--security-opt
|
||||
--signature-policy
|
||||
-t
|
||||
--file
|
||||
-f
|
||||
--build-arg
|
||||
--format
|
||||
--tag
|
||||
"
|
||||
|
||||
local all_options="$options_with_args $boolean_options"
|
||||
@@ -390,6 +401,7 @@ return 1
|
||||
--hostname
|
||||
--runtime
|
||||
--runtime-flag
|
||||
--security-opt
|
||||
--volume
|
||||
-v
|
||||
"
|
||||
@@ -554,6 +566,9 @@ return 1
|
||||
"
|
||||
|
||||
local options_with_args="
|
||||
--filter
|
||||
-f
|
||||
--format
|
||||
"
|
||||
|
||||
local all_options="$options_with_args $boolean_options"
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
%global shortcommit %(c=%{commit}; echo ${c:0:7})
|
||||
|
||||
Name: buildah
|
||||
Version: 0.7
|
||||
Version: 0.11
|
||||
Release: 1.git%{shortcommit}%{?dist}
|
||||
Summary: A command line tool used to creating OCI Images
|
||||
License: ASL 2.0
|
||||
@@ -43,6 +43,7 @@ BuildRequires: btrfs-progs-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: make
|
||||
Requires: runc >= 1.0.0-6
|
||||
Requires: container-selinux
|
||||
Requires: skopeo-containers
|
||||
@@ -88,6 +89,52 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
%{_datadir}/bash-completion/completions/*
|
||||
|
||||
%changelog
|
||||
* Mon Feb 12 2018 Dan Walsh <dwalsh@redhat.com> 0.12-1
|
||||
- Added handing for simpler error message for Unknown Dockerfile instructions.
|
||||
- Change default certs directory to /etc/containers/certs.dir
|
||||
- Vendor in latest containers/image
|
||||
- Vendor in latest containers/storage
|
||||
- build-using-dockerfile: set the 'author' field for MAINTAINER
|
||||
- Return exit code 1 when buildah-rmi fails
|
||||
- Trim the image reference to just its name before calling getImageName
|
||||
- Touch up rmi -f usage statement
|
||||
- Add --format and --filter to buildah containers
|
||||
- Add --prune,-p option to rmi command
|
||||
- Add authfile param to commit
|
||||
- Fix --runtime-flag for buildah run and bud
|
||||
- format should override quiet for images
|
||||
- Allow all auth params to work with bud
|
||||
- Do not overwrite directory permissions on --chown
|
||||
- Unescape HTML characters output into the terminal
|
||||
- Fix: setting the container name to the image
|
||||
- Prompt for un/pwd if not supplied with --creds
|
||||
- Make bud be really quiet
|
||||
- Return a better error message when failed to resolve an image
|
||||
- Update auth tests and fix bud man page
|
||||
|
||||
* Tue Jan 16 2018 Dan Walsh <dwalsh@redhat.com> 0.11-1
|
||||
- Add --all to remove containers
|
||||
- Add --all functionality to rmi
|
||||
- Show ctrid when doing rm -all
|
||||
- Ignore sequential duplicate layers when reading v2s1
|
||||
- Lots of minor bug fixes
|
||||
- Vendor in latest containers/image and containers/storage
|
||||
|
||||
* Sat Dec 23 2017 Dan Walsh <dwalsh@redhat.com> 0.10-1
|
||||
- Display Config and Manifest as strings
|
||||
- Bump containers/image
|
||||
- Use configured registries to resolve image names
|
||||
- Update to work with newer image library
|
||||
- Add --chown option to add/copy commands
|
||||
|
||||
* Sat Dec 2 2017 Dan Walsh <dwalsh@redhat.com> 0.9-1
|
||||
- Allow push to use the image id
|
||||
- Make sure builtin volumes have the correct label
|
||||
|
||||
* Thu Nov 16 2017 Dan Walsh <dwalsh@redhat.com> 0.8-1
|
||||
- Buildah bud was failing on SELinux machines, this fixes this
|
||||
- Block access to certain kernel file systems inside of the container
|
||||
|
||||
* Thu Nov 16 2017 Dan Walsh <dwalsh@redhat.com> 0.7-1
|
||||
- Ignore errors when trying to read containers buildah.json for loading SELinux reservations
|
||||
- Use credentials from kpod login for buildah
|
||||
@@ -98,6 +145,10 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
- Bump github.com/vbatts/tar-split
|
||||
- Set option.terminal appropriately in run
|
||||
|
||||
* Wed Nov 08 2017 Dan Walsh <dwalsh@redhat.com> 0.5-2
|
||||
- Bump github.com/vbatts/tar-split
|
||||
- Fixes CVE That could allow a container image to cause a DOS
|
||||
|
||||
* Tue Nov 07 2017 Dan Walsh <dwalsh@redhat.com> 0.5-1
|
||||
- Add secrets patch to buildah
|
||||
- Add proper SELinux labeling to buildah run
|
||||
@@ -128,10 +179,19 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
- Turn on --enable-gc when running gometalinter
|
||||
- rmi: handle truncated image IDs
|
||||
|
||||
* Thu Jul 20 2017 Dan Walsh <dwalsh@redhat.com> 0.3.0-1
|
||||
* Tue Aug 15 2017 Josh Boyer <jwboyer@redhat.com> - 0.3-5.gitb9b2a8a
|
||||
- Build for s390x as well
|
||||
|
||||
* Wed Aug 02 2017 Fedora Release Engineering <releng@fedoraproject.org> - 0.3-4.gitb9b2a8a
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
|
||||
|
||||
* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 0.3-3.gitb9b2a8a
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
|
||||
|
||||
* Thu Jul 20 2017 Dan Walsh <dwalsh@redhat.com> 0.3-2.gitb9b2a8a7e
|
||||
- Bump for inclusion of OCI 1.0 Runtime and Image Spec
|
||||
|
||||
* Tue Jul 18 2017 Dan Walsh <dwalsh@redhat.com> 0.2.0-1
|
||||
* Tue Jul 18 2017 Dan Walsh <dwalsh@redhat.com> 0.2.0-1.gitac2aad6
|
||||
- buildah run: Add support for -- ending options parsing
|
||||
- buildah Add/Copy support for glob syntax
|
||||
- buildah commit: Add flag to remove containers on commit
|
||||
@@ -148,7 +208,11 @@ make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
- buildah version: add command
|
||||
- buildah run: Handle run without an explicit command correctly
|
||||
- Ensure volume points get created, and with perms
|
||||
- buildah containers: Add a -a/--all option
|
||||
- buildah containers: Add a -a/--all option
|
||||
|
||||
* Wed Jun 14 2017 Dan Walsh <dwalsh@redhat.com> 0.1.0-2.git597d2ab9
|
||||
- Release Candidate 1
|
||||
- All features have now been implemented.
|
||||
|
||||
* Fri Apr 14 2017 Dan Walsh <dwalsh@redhat.com> 0.0.1-1.git7a0a5333
|
||||
- First package for Fedora
|
||||
|
||||
@@ -13,10 +13,18 @@ appears to be an archive, its contents are extracted and added instead of the
|
||||
archive file itself. If a local directory is specified as a source, its
|
||||
*contents* are copied to the destination.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--chown** *owner*:*group*
|
||||
|
||||
Sets the user and group ownership of the destination content.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah add containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah add --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah add containerID '/home/myuser/myproject.go'
|
||||
|
||||
buildah add containerID '/home/myuser/myfiles.tar' '/tmp'
|
||||
|
||||
@@ -13,10 +13,9 @@ build context directory. The build context directory can be specified as the
|
||||
to a temporary location.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--build-arg** *arg=value*
|
||||
@@ -26,6 +25,17 @@ instructions read from the Dockerfiles in the same way that environment
|
||||
variables are, but which will not be added to environment variable list in the
|
||||
resulting image's configuration.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**-f, --file** *Dockerfile*
|
||||
|
||||
Specifies a Dockerfile which contains instructions for building the image,
|
||||
@@ -53,7 +63,7 @@ Defaults to *true*.
|
||||
|
||||
Pull the image even if a version of the image is already present.
|
||||
|
||||
**--quiet**
|
||||
**-q, --quiet**
|
||||
|
||||
Suppress output messages which indicate which instruction is being processed,
|
||||
and of progress when pulling images from a registry, and when writing the
|
||||
@@ -66,7 +76,11 @@ commands specified by the **RUN** instruction.
|
||||
|
||||
**--runtime-flag** *flag*
|
||||
|
||||
Adds global flags for the container rutime.
|
||||
Adds global flags for the container rutime. To list the supported flags, please
|
||||
consult manpages of your selected container runtime (`runc` is the default
|
||||
runtime, the manpage to consult is `runc(8)`).
|
||||
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
|
||||
to buildah bud, the option given would be `--runtime-flag log-format=json`.
|
||||
|
||||
**--signature-policy** *signaturepolicy*
|
||||
|
||||
@@ -97,5 +111,11 @@ buildah bud --tls-verify=true -t imageName -f Dockerfile.simple
|
||||
|
||||
buildah bud --tls-verify=false -t imageName .
|
||||
|
||||
buildah bud --runtime-flag log-format=json .
|
||||
|
||||
buildah bud --runtime-flag debug .
|
||||
|
||||
buildah bud --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password -t imageName -f Dockerfile.simple
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -13,13 +13,21 @@ specified, an ID is assigned, but no name is assigned to the image.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The username[:password] to use to authenticate with the registry if required.
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--disable-compression, -D**
|
||||
|
||||
@@ -71,5 +79,8 @@ This example commits the container to the image on the local registry while turn
|
||||
This example commits the container to the image on the local registry using credentials and certificates for authentication.
|
||||
`buildah commit --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
|
||||
|
||||
This example commits the container to the image on the local registry using credentials from the /tmp/auths/myauths.json file and certificates for authentication.
|
||||
`buildah commit --authfile /tmp/auths/myauths.json --cert-dir ~/auth --tls-verify=true --creds=username:password containerID docker://localhost:5000/imageId`
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -15,7 +15,34 @@ IDs, and the names and IDs of the images from which they were initialized.
|
||||
**--all, -a**
|
||||
|
||||
List information about all containers, including those which were not created
|
||||
by and are not being used by Buildah.
|
||||
by and are not being used by Buildah. Containers created by Buildah are
|
||||
denoted with an '*' in the 'BUILDER' column.
|
||||
|
||||
**--filter, -f**
|
||||
|
||||
Filter output based on conditions provided.
|
||||
|
||||
Valid filters are listed below:
|
||||
|
||||
| **Filter** | **Description** |
|
||||
| --------------- | ------------------------------------------------------------------- |
|
||||
| id | [ID] Container's ID |
|
||||
| name | [Name] Container's name |
|
||||
| ancestor | [ImageName] Image or descendant used to create container |
|
||||
|
||||
**--format**
|
||||
|
||||
Pretty-print containers using a Go template.
|
||||
|
||||
Valid placeholders for the Go template are listed below:
|
||||
|
||||
| **Placeholder** | **Description** |
|
||||
| --------------- | -----------------------------------------|
|
||||
| .ContainerID | Container ID |
|
||||
| .Builder | Whether container was created by buildah |
|
||||
| .ImageID | Image ID |
|
||||
| .ImageName | Image name |
|
||||
| .ContainerName | Container name |
|
||||
|
||||
**--json**
|
||||
|
||||
@@ -36,12 +63,55 @@ Displays only the container IDs.
|
||||
## EXAMPLE
|
||||
|
||||
buildah containers
|
||||
```
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
29bdb522fc62 * 3fd9065eaf02 docker.io/library/alpine:latest alpine-working-container
|
||||
c6b04237ac8e * f9b6f7f7b9d3 docker.io/library/busybox:latest busybox-working-container
|
||||
```
|
||||
|
||||
buildah containers --quiet
|
||||
```
|
||||
29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0
|
||||
c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb
|
||||
```
|
||||
|
||||
buildah containers -q --noheading --notruncate
|
||||
```
|
||||
29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0
|
||||
c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb
|
||||
```
|
||||
|
||||
buildah containers --json
|
||||
```
|
||||
[
|
||||
{
|
||||
"id": "29bdb522fc62d43fca0c1a0f11cfc6dfcfed169cf6cf25f928ebca1a612ff5b0",
|
||||
"builder": true,
|
||||
"imageid": "3fd9065eaf02feaf94d68376da52541925650b81698c53c6824d92ff63f98353",
|
||||
"imagename": "docker.io/library/alpine:latest",
|
||||
"containername": "alpine-working-container"
|
||||
},
|
||||
{
|
||||
"id": "c6b04237ac8e9d435ec9cf0e7eda91e302f2db9ef908418522c2d666352281eb",
|
||||
"builder": true,
|
||||
"imageid": "f9b6f7f7b9d34113f66e16a9da3e921a580937aec98da344b852ca540aaa2242",
|
||||
"imagename": "docker.io/library/busybox:latest",
|
||||
"containername": "busybox-working-container"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
buildah containers --format "{{.ContainerID}} {{.ContainerName}}"
|
||||
```
|
||||
3fbeaa87e583ee7a3e6787b2d3af961ef21946a0c01a08938e4f52d53cce4c04 myalpine-working-container
|
||||
fbfd3505376ee639c3ed50f9d32b78445cd59198a1dfcacf2e7958cda2516d5c ubuntu-working-container
|
||||
```
|
||||
|
||||
buildah containers --filter ancestor=ubuntu
|
||||
```
|
||||
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
|
||||
fbfd3505376e * 0ff04b2e7b63 docker.io/library/ubuntu:latest ubuntu-working-container
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -11,10 +11,18 @@ Copies the contents of a file, URL, or a directory to a container's working
|
||||
directory or a specified location in the container. If a local directory is
|
||||
specified as a source, its *contents* are copied to the destination.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--chown** *owner*:*group*
|
||||
|
||||
Sets the user and group ownership of the destination content.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah copy containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah copy --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
|
||||
|
||||
buildah copy containerID '/home/myuser/myproject.go'
|
||||
|
||||
buildah copy containerID '/home/myuser/myfiles.tar' '/tmp'
|
||||
|
||||
@@ -17,7 +17,7 @@ Multiple transports are supported:
|
||||
An existing local directory _path_ retrieving the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_ (Default)
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_
|
||||
An image is retrieved as a `docker load` formatted file.
|
||||
@@ -38,16 +38,19 @@ The container ID of the container that was created. On error, -1 is returned an
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The username[:password] to use to authenticate with the registry if required.
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--name** *name*
|
||||
|
||||
@@ -94,4 +97,4 @@ buildah from myregistry/myrepository/imagename:imagetag --creds=myusername:mypas
|
||||
buildah from myregistry/myrepository/imagename:imagetag --authfile=/tmp/auths/myauths.json
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -22,7 +22,7 @@ keywords are 'dangling', 'label', 'before' and 'since'.
|
||||
|
||||
**--format="TEMPLATE"**
|
||||
|
||||
Pretty-print images using a Go template. Will override --quiet
|
||||
Pretty-print images using a Go template.
|
||||
|
||||
**--json**
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Image stored in local container/storage
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -42,16 +42,19 @@ Image stored in local container/storage
|
||||
|
||||
**--authfile** *path*
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--cert-dir** *path*
|
||||
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry
|
||||
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry.
|
||||
Default certificates directory is _/etc/containers/certs.d_.
|
||||
|
||||
**--creds** *creds*
|
||||
|
||||
The username[:password] to use to authenticate with the registry if required.
|
||||
The [username[:password]] to use to authenticate with the registry if required.
|
||||
If one or both values are not supplied, a command line prompt will appear and the
|
||||
value can be entered. The password is entered without echo.
|
||||
|
||||
**--disable-compression, -D**
|
||||
|
||||
@@ -104,4 +107,4 @@ This example extracts the imageID image and puts it into the registry on the loc
|
||||
`# buildah push --cert-dir ~/auth --tls-verify=true --creds=username:password imageID docker://localhost:5000/my-imageID`
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1), kpod-login(1), docker-login(1)
|
||||
buildah(1), podman-login(1), docker-login(1)
|
||||
|
||||
@@ -9,11 +9,19 @@ buildah rm - Removes one or more working containers.
|
||||
## DESCRIPTION
|
||||
Removes one or more working containers, unmounting them if necessary.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
All Buildah containers will be removed. Buildah containers are denoted with an '*' in the 'BUILDER' column listed by the command 'buildah containers'.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah rm containerID
|
||||
|
||||
buildah rm containerID1 containerID2 containerID3
|
||||
|
||||
buildah rm --all
|
||||
|
||||
## SEE ALSO
|
||||
buildah(1)
|
||||
|
||||
@@ -9,16 +9,35 @@ buildah rmi - Removes one or more images.
|
||||
## DESCRIPTION
|
||||
Removes one or more locally stored images.
|
||||
|
||||
## LIMITATIONS
|
||||
If the image was pushed to a directory path using the 'dir:' transport
|
||||
the rmi command can not remove the image. Instead standard file system
|
||||
commands should be used.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, -a**
|
||||
|
||||
All local images will be removed from the system that do not have containers using the image as a reference image.
|
||||
|
||||
**--prune, -p**
|
||||
|
||||
All local images will be removed from the system that do not have a tag and do not have a child image pointing to them.
|
||||
|
||||
**--force, -f**
|
||||
|
||||
Executing this command will stop all containers that are using the image and remove them from the system
|
||||
This option will cause Buildah to remove all containers that are using the image before removing the image from the system.
|
||||
|
||||
## EXAMPLE
|
||||
|
||||
buildah rmi imageID
|
||||
|
||||
buildah rmi --all
|
||||
|
||||
buildah rmi --all --force
|
||||
|
||||
buildah rmi --prune
|
||||
|
||||
buildah rmi --force imageID
|
||||
|
||||
buildah rmi imageID1 imageID2 imageID3
|
||||
|
||||
@@ -14,7 +14,6 @@ the *buildah config* command. If you execute *buildah run* and expect an
|
||||
interactive shell, you need to specify the --tty flag.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--hostname**
|
||||
Set the hostname inside of the running container.
|
||||
|
||||
@@ -26,7 +25,9 @@ The *path* to an alternate OCI-compatible runtime.
|
||||
|
||||
Adds global flags for the container runtime. To list the supported flags, please
|
||||
consult manpages of your selected container runtime (`runc` is the default
|
||||
runtime, the manpage to consult is `runc(8)`)
|
||||
runtime, the manpage to consult is `runc(8)`).
|
||||
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
|
||||
to buildah run, the option given would be `--runtime-flag log-format=json`.
|
||||
|
||||
**--tty**
|
||||
|
||||
@@ -49,7 +50,9 @@ buildah run containerID -- ps -auxw
|
||||
|
||||
buildah run containerID --hostname myhost -- ps -auxw
|
||||
|
||||
buildah run containerID --runtime-flag --no-new-keyring -- ps -auxw
|
||||
buildah run --runtime-flag log-format=json containerID /bin/bash
|
||||
|
||||
buildah run --runtime-flag debug containerID /bin/bash
|
||||
|
||||
buildah run --tty containerID /bin/bash
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ The Buildah package provides a command line tool which can be used to:
|
||||
* Use the updated contents of a container's root filesystem as a filesystem layer to create a new image.
|
||||
* Delete a working container or an image.
|
||||
|
||||
This tool needs to be run as the root user.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--debug**
|
||||
@@ -24,12 +26,28 @@ Print debugging information
|
||||
|
||||
**--default-mounts-file**
|
||||
|
||||
path to default mounts file (default path: "/usr/share/containers/mounts.conf")
|
||||
Path to default mounts file (default path: "/usr/share/containers/mounts.conf")
|
||||
|
||||
**--help, -h**
|
||||
|
||||
Show help
|
||||
|
||||
**--registries-conf** *path*
|
||||
|
||||
Pathname of the configuration file which specifies which registries should be
|
||||
consulted when completing image names which do not include a registry or domain
|
||||
portion. It is not recommended that this option be used, as the default
|
||||
behavior of using the system-wide configuration
|
||||
(*/etc/containers/registries.conf*) is most often preferred.
|
||||
|
||||
**--registries-conf-dir** *path*
|
||||
|
||||
Pathname of the directory which contains configuration snippets which specify
|
||||
registries which should be consulted when completing image names which do not
|
||||
include a registry or domain portion. It is not recommended that this option
|
||||
be used, as the default behavior of using the system-wide configuration
|
||||
(*/etc/containers/registries.d*) is most often preferred.
|
||||
|
||||
**--root** **value**
|
||||
|
||||
Storage root dir (default: "/var/lib/containers/storage")
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||

|
||||
|
||||
# Buildah Tutorial 1
|
||||
## Building OCI container images
|
||||
|
||||
|
||||
134
docs/tutorials/02-registries-repositories.md
Normal file
@@ -0,0 +1,134 @@
|
||||

|
||||
|
||||
# Buildah Tutorial 2
|
||||
## Using Buildah with container registries
|
||||
|
||||
The purpose of this tutorial is to demonstrate how Buildah can be used to move OCI compliant images in and out of private or public registries.
|
||||
|
||||
In the [first tutorial](https://github.com/projectatomic/buildah/blob/master/docs/tutorials/01-intro.md) we built an image from scratch that we called `fedora-bashecho` and we pushed it to a local Docker repository using the `docker-daemon` protocol. We are going to use the same image to push to a private Docker registry.
|
||||
|
||||
First we must pull down a registry. As a shortcut we will save the container name that is returned from the `buildah from` command, into a bash variable called `registry`. This is just like we did in Tutorial 1:
|
||||
|
||||
# registry=$(buildah from registry)
|
||||
|
||||
It is worth pointing out that the `from` command can also use other protocols beyond the default (and implicity assumed) order that first looks in local containers-storage (containers-storage:) and then looks in the Docker hub (docker:). For example, if you already had a registry container image in a local Docker registry then you could use the following:
|
||||
|
||||
# registry=$(buildah from docker-daemon:registry:latest)
|
||||
|
||||
Then we need to start the registry. You should start the registry in a separate shell and leave it running there:
|
||||
|
||||
# buildah run $registry
|
||||
|
||||
If you would like to see more details as to what is going on inside the registry, especially if you are having problems with the registry, you can run the registry container in debug mode as follows:
|
||||
|
||||
# buildah --debug run $registry
|
||||
|
||||
You can use `--debug` on any Buildah command.
|
||||
|
||||
The registry is running and is waiting for requests to process. Notice that this registry is a Docker registry that we pulled from Docker hub and we are running it for this example using `buildah run`. There is no Docker daemon running at this time.
|
||||
|
||||
Let's push our image to the private registry. By default, Buildah is set up to expect secure connections to a registry. Therefore we will need to turn the TLS verification off using the `--tls-verify` flag. We also need to tell Buildah that the registry is on this local host ( i.e. localhost) and listening on port 5000. Similar to what you'd expect to do on multi-tenant Docker hub, we will explicitly specify that the registry is to store the image under the `ipbabble` repository - so as not to clash with other users' similarly named images.
|
||||
|
||||
# buildah push --tls-verify=false fedora-bashecho docker://localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
|
||||
[Skopeo](https://github.com/projectatomic/skopeo) is a ProjectAtomic tool that was created to inspect images in registries without having to pull the image from the registry. It has grown to have many other uses. We will verify that the image has been stored by using Skopeo to inspect the image in the registry:
|
||||
|
||||
# skopeo inspect --tls-verify=false docker://localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
{
|
||||
"Name": "localhost:5000/ipbabble/fedora-bashecho",
|
||||
"Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
|
||||
"RepoTags": [
|
||||
"latest"
|
||||
],
|
||||
"Created": "2017-12-05T21:38:12.311901938Z",
|
||||
"DockerVersion": "",
|
||||
"Labels": {
|
||||
"name": "fedora-bashecho"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
|
||||
]
|
||||
}
|
||||
|
||||
We can verify that it is still portable with Docker by starting Docker again, as we did in the first tutorial. Then we can pull down the image and starting the container using Docker:
|
||||
|
||||
# systemctl start docker
|
||||
# docker pull localhost:5000/ipbabble/fedora-bashecho
|
||||
Using default tag: latest
|
||||
Trying to pull repository localhost:5000/ipbabble/fedora-bashecho ...
|
||||
sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137: Pulling from localhost:5000/ipbabble/fedora-bashecho
|
||||
0cb7556c7147: Pull complete
|
||||
Digest: sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137
|
||||
Status: Downloaded newer image for localhost:5000/ipbabble/fedora-bashecho:latest
|
||||
|
||||
# docker run localhost:5000/ipbabble/fedora-bashecho
|
||||
This is a new container named ipbabble [ 0 ]
|
||||
This is a new container named ipbabble [ 1 ]
|
||||
This is a new container named ipbabble [ 2 ]
|
||||
This is a new container named ipbabble [ 3 ]
|
||||
This is a new container named ipbabble [ 4 ]
|
||||
This is a new container named ipbabble [ 5 ]
|
||||
This is a new container named ipbabble [ 6 ]
|
||||
This is a new container named ipbabble [ 7 ]
|
||||
This is a new container named ipbabble [ 8 ]
|
||||
This is a new container named ipbabble [ 9 ]
|
||||
# systemctl stop docker
|
||||
|
||||
Pushing to Docker hub is just as easy. Of course you must have an account with credentials. In this example I'm using a Docker hub API key, which has the form "username:password" (example password has been edited for privacy), that I created with my Docker hub account. I use the `--creds` flag to use my API key. I also specify my local image name `fedora-bashecho` as my image source and I use the `docker` protocol with no host or port so that it will look at the default Docker hub registry:
|
||||
|
||||
# buildah push --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c fedora-bashecho docker://ipbabble/fedora-bashecho:latest
|
||||
|
||||
And let's inspect that with Skopeo:
|
||||
|
||||
# skopeo inspect --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c docker://ipbabble/fedora-bashecho:latest
|
||||
{
|
||||
"Name": "docker.io/ipbabble/fedora-bashecho",
|
||||
"Digest": "sha256:6806f9385f97bc09f54b5c0ef583e58c3bc906c8c0b3e693d8782d0a0acf2137",
|
||||
"RepoTags": [
|
||||
"latest"
|
||||
],
|
||||
"Created": "2017-12-05T21:38:12.311901938Z",
|
||||
"DockerVersion": "",
|
||||
"Labels": {
|
||||
"name": "fedora-bashecho"
|
||||
},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
|
||||
]
|
||||
}
|
||||
|
||||
We can use Buildah to pull down the image using the `buildah from` command. But before we do let's clean up our local containers-storage so that we don't have an existing fedora-bashecho - otherwise Buildah will know it already exists and not bother pulling it down.
|
||||
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
e31b0f0b0a63 docker.io/library/fedora-bashecho:latest Dec 5, 2017 21:38 772 B
|
||||
# buildah rmi fedora-bashecho
|
||||
untagged: docker.io/library/fedora-bashecho:latest
|
||||
e31b0f0b0a63e94c5a558d438d7490fab930a282a4736364360ab9b92cb25f3a
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
|
||||
Okay, so we don't have a fedora-bashecho anymore. Let's pull the image from Docker hub:
|
||||
|
||||
# buildah from ipbabble/fedora-bashecho
|
||||
|
||||
If you don't want to bother doing the remove image step (`rmi`) you can use the flag `--pull-always` to force the image to be pulled again and overwrite any corresponding local image.
|
||||
|
||||
Now check that image is in the local containers-storage:
|
||||
|
||||
# buildah images
|
||||
IMAGE ID IMAGE NAME CREATED AT SIZE
|
||||
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
|
||||
864871ac1c45 docker.io/ipbabble/fedora-bashecho:latest Dec 5, 2017 21:38 315.4 MB
|
||||
|
||||
Success!
|
||||
|
||||
If you have any suggestions or issues please post them at the [ProjectAtomic Buildah Issues page](https://github.com/projectatomic/buildah/issues).
|
||||
|
||||
For more information on Buildah and how you might contribute please visit the [Buildah home page on Github](https://github.com/projectatomic/buildah).
|
||||
16
docs/tutorials/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||

|
||||
|
||||
# Buildah Tutorials
|
||||
|
||||
## Links to a number of useful tutorials for the Buildah project.
|
||||
|
||||
**[Introduction Tutorial](https://github.com/projectatomic/buildah/tree/master/docs/tutorials/01-intro.md)**
|
||||
|
||||
Learn how to build container images compliant with the [Open Container Initiative](https://www.opencontainers.org/) (OCI) [image specification](https://github.com/opencontainers/image-spec) using Buildah.
|
||||
|
||||
|
||||
**[Buildah and Registries Tutorial](https://github.com/projectatomic/buildah/tree/master/docs/tutorials/02-registries-repositories.md)**
|
||||
|
||||
Learn how Buildah can be used to move OCI compliant images in and out of private or public registries.
|
||||
|
||||
|
||||
198
image.go
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
@@ -43,7 +42,6 @@ type containerImageRef struct {
|
||||
name reference.Named
|
||||
names []string
|
||||
layerID string
|
||||
addHistory bool
|
||||
oconfig []byte
|
||||
dconfig []byte
|
||||
created time.Time
|
||||
@@ -59,7 +57,6 @@ type containerImageSource struct {
|
||||
store storage.Store
|
||||
layerID string
|
||||
names []string
|
||||
addHistory bool
|
||||
compression archive.Compression
|
||||
config []byte
|
||||
configDigest digest.Digest
|
||||
@@ -68,12 +65,32 @@ type containerImageSource struct {
|
||||
exporting bool
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := i.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(src)
|
||||
return image.FromSource(sc, src)
|
||||
}
|
||||
|
||||
func expectedOCIDiffIDs(image v1.Image) int {
|
||||
expected := 0
|
||||
for _, history := range image.History {
|
||||
if !history.EmptyLayer {
|
||||
expected = expected + 1
|
||||
}
|
||||
}
|
||||
return expected
|
||||
}
|
||||
|
||||
func expectedDockerDiffIDs(image docker.V2Image) int {
|
||||
expected := 0
|
||||
for _, history := range image.History {
|
||||
if !history.EmptyLayer {
|
||||
expected = expected + 1
|
||||
}
|
||||
}
|
||||
return expected
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||
@@ -128,11 +145,14 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
created := i.created
|
||||
oimage.Created = &created
|
||||
dimage := docker.V2Image{}
|
||||
err = json.Unmarshal(i.dconfig, &dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dimage.Created = created
|
||||
|
||||
// Start building manifests.
|
||||
omanifest := v1.Manifest{
|
||||
@@ -164,9 +184,39 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
for _, layerID := range layers {
|
||||
// The default layer media type assumes no compression.
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
// Figure out which media type we want to call this. Assume no compression.
|
||||
// If we're not re-exporting the data, reuse the blobsum and diff IDs.
|
||||
if !i.exporting && layerID != i.layerID {
|
||||
layer, err2 := i.store.Layer(layerID)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
|
||||
}
|
||||
if layer.UncompressedDigest == "" {
|
||||
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
|
||||
}
|
||||
layerBlobSum := layer.UncompressedDigest
|
||||
layerBlobSize := layer.UncompressedSize
|
||||
// Note this layer in the manifest, using the uncompressed blobsum.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Note this layer in the list of diffIDs, again using the uncompressed blobsum.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
|
||||
continue
|
||||
}
|
||||
// Figure out if we need to change the media type, in case we're using compression.
|
||||
if i.compression != archive.Uncompressed {
|
||||
switch i.compression {
|
||||
case archive.Gzip:
|
||||
@@ -177,50 +227,26 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return nil, errors.New("media type for bzip2-compressed layers is not defined")
|
||||
case archive.Xz:
|
||||
// Until the image specs define a media type for xz-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with xz.
|
||||
return nil, errors.New("media type for xz-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
||||
}
|
||||
}
|
||||
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
||||
if !i.exporting {
|
||||
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
||||
// Add a note in the manifest about the layer. The blobs should be identified by their
|
||||
// possibly-compressed blob digests, but just use the layer IDs here.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
||||
// just use the layer ID here.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
continue
|
||||
}
|
||||
// Start reading the layer.
|
||||
rc, err := i.store.Diff("", layerID, nil)
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
rc, err := i.store.Diff("", layerID, diffOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
||||
}
|
||||
defer rc.Close()
|
||||
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
||||
// differences, the result may not match the digest the blob had when it was originally imported,
|
||||
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
||||
// correct.
|
||||
uncompressed, err := archive.DecompressStream(rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
||||
}
|
||||
defer uncompressed.Close()
|
||||
srcHasher := digest.Canonical.Digester()
|
||||
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
||||
reader := io.TeeReader(rc, srcHasher.Hash())
|
||||
// Set up to write the possibly-recompressed blob.
|
||||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
@@ -229,7 +255,7 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
destHasher := digest.Canonical.Digester()
|
||||
counter := ioutils.NewWriteCounter(layerFile)
|
||||
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
||||
// Compress the layer, if we're compressing it.
|
||||
// Compress the layer, if we're recompressing it.
|
||||
writer, err := archive.CompressStream(multiWriter, i.compression)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
||||
@@ -267,27 +293,36 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
Size: size,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which is always an uncompressed value.
|
||||
// Add a note about the diffID, which is always the layer's uncompressed digest.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
}
|
||||
|
||||
if i.addHistory {
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: &i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: &i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: i.created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
|
||||
// Sanity check that we didn't just create a mismatch between non-empty layers in the
|
||||
// history and the number of diffIDs.
|
||||
expectedDiffIDs := expectedOCIDiffIDs(oimage)
|
||||
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
|
||||
}
|
||||
expectedDiffIDs = expectedDockerDiffIDs(dimage)
|
||||
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
|
||||
}
|
||||
|
||||
// Encode the image configuration blob.
|
||||
@@ -347,7 +382,6 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
||||
store: i.store,
|
||||
layerID: i.layerID,
|
||||
names: i.names,
|
||||
addHistory: i.addHistory,
|
||||
compression: i.compression,
|
||||
config: config,
|
||||
configDigest: digest.Canonical.FromBytes(config),
|
||||
@@ -402,16 +436,22 @@ func (i *containerImageSource) Reference() types.ImageReference {
|
||||
return i.ref
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||
return nil, errors.Errorf("TODO")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return []byte{}, "", errors.Errorf("TODO")
|
||||
func (i *containerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||
return nil, "", errors.Errorf("TODO")
|
||||
}
|
||||
return i.manifest, i.manifestType, nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetManifest() ([]byte, string, error) {
|
||||
return i.manifest, i.manifestType, nil
|
||||
func (i *containerImageSource) LayerInfosForCopy() []types.BlobInfo {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
||||
@@ -445,10 +485,14 @@ func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadClose
|
||||
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
||||
}
|
||||
|
||||
func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
var name reference.Named
|
||||
if len(names) > 0 {
|
||||
if parsed, err := reference.ParseNamed(names[0]); err == nil {
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||
}
|
||||
if len(container.Names) > 0 {
|
||||
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
|
||||
name = parsed
|
||||
}
|
||||
}
|
||||
@@ -471,9 +515,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
||||
store: b.store,
|
||||
compression: compress,
|
||||
name: name,
|
||||
names: names,
|
||||
layerID: layerID,
|
||||
addHistory: addHistory,
|
||||
names: container.Names,
|
||||
layerID: container.LayerID,
|
||||
oconfig: oconfig,
|
||||
dconfig: dconfig,
|
||||
created: created,
|
||||
@@ -484,18 +527,3 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (b *Builder) makeContainerImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
if manifestType == "" {
|
||||
manifestType = OCIv1ImageManifest
|
||||
}
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||
}
|
||||
return b.makeImageRef(manifestType, exporting, true, compress, container.Names, container.LayerID, historyTimestamp)
|
||||
}
|
||||
|
||||
func (b *Builder) makeImageImageRef(compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
return b.makeImageRef(manifest.GuessMIMEType(b.Manifest), true, false, compress, names, layerID, historyTimestamp)
|
||||
}
|
||||
|
||||
@@ -95,8 +95,6 @@ type BuildOptions struct {
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// SkipTLSVerify denotes whether TLS verification should not be used.
|
||||
SkipTLSVerify bool
|
||||
// ReportWriter is an io.Writer which will be used to report the
|
||||
// progress of the (possible) pulling of the source image and the
|
||||
// writing of the new image.
|
||||
@@ -105,7 +103,8 @@ type BuildOptions struct {
|
||||
// configuration data.
|
||||
// Accepted values are OCIv1ImageFormat and Dockerv2ImageFormat.
|
||||
OutputFormat string
|
||||
AuthFilePath string
|
||||
// SystemContext holds parameters used for authentication.
|
||||
SystemContext *types.SystemContext
|
||||
}
|
||||
|
||||
// Executor is a buildah-based implementation of the imagebuilder.Executor
|
||||
@@ -139,18 +138,6 @@ type Executor struct {
|
||||
reportWriter io.Writer
|
||||
}
|
||||
|
||||
func makeSystemContext(signaturePolicyPath, authFilePath string, skipTLSVerify bool) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
if authFilePath != "" {
|
||||
sc.AuthFilePath = authFilePath
|
||||
}
|
||||
sc.DockerInsecureSkipTLSVerify = skipTLSVerify
|
||||
return sc
|
||||
}
|
||||
|
||||
// Preserve informs the executor that from this point on, it needs to ensure
|
||||
// that only COPY and ADD instructions can modify the contents of this
|
||||
// directory or anything below it.
|
||||
@@ -341,7 +328,7 @@ func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||
sources = append(sources, filepath.Join(b.contextDir, src))
|
||||
}
|
||||
}
|
||||
if err := b.builder.Add(copy.Dest, copy.Download, sources...); err != nil {
|
||||
if err := b.builder.Add(copy.Dest, copy.Download, buildah.AddAndCopyOptions{}, sources...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -380,6 +367,7 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
Entrypoint: config.Entrypoint,
|
||||
Cmd: config.Cmd,
|
||||
NetworkDisabled: config.NetworkDisabled,
|
||||
Quiet: b.quiet,
|
||||
}
|
||||
|
||||
args := run.Args
|
||||
@@ -401,12 +389,23 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
// UnrecognizedInstruction is called when we encounter an instruction that the
|
||||
// imagebuilder parser didn't understand.
|
||||
func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error {
|
||||
if !b.ignoreUnrecognizedInstructions {
|
||||
logrus.Debugf("+(UNIMPLEMENTED?) %#v", step)
|
||||
err_str := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command)
|
||||
err := fmt.Sprintf(err_str+"%#v", step)
|
||||
if b.ignoreUnrecognizedInstructions {
|
||||
logrus.Debugf(err)
|
||||
return nil
|
||||
}
|
||||
logrus.Errorf("+(UNIMPLEMENTED?) %#v", step)
|
||||
return errors.Errorf("Unrecognized instruction: %#v", step)
|
||||
|
||||
switch logrus.GetLevel() {
|
||||
case logrus.ErrorLevel:
|
||||
logrus.Errorf(err_str)
|
||||
case logrus.DebugLevel:
|
||||
logrus.Debugf(err)
|
||||
default:
|
||||
logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
|
||||
}
|
||||
|
||||
return errors.Errorf(err)
|
||||
}
|
||||
|
||||
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
|
||||
@@ -427,7 +426,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
|
||||
outputFormat: options.OutputFormat,
|
||||
additionalTags: options.AdditionalTags,
|
||||
signaturePolicyPath: options.SignaturePolicyPath,
|
||||
systemContext: makeSystemContext(options.SignaturePolicyPath, options.AuthFilePath, options.SkipTLSVerify),
|
||||
systemContext: options.SystemContext,
|
||||
volumeCache: make(map[string]string),
|
||||
volumeCacheInfo: make(map[string]os.FileInfo),
|
||||
log: options.Log,
|
||||
@@ -475,6 +474,7 @@ func (b *Executor) Prepare(ib *imagebuilder.Builder, node *parser.Node, from str
|
||||
Transport: b.transport,
|
||||
SignaturePolicyPath: b.signaturePolicyPath,
|
||||
ReportWriter: b.reportWriter,
|
||||
SystemContext: b.systemContext,
|
||||
}
|
||||
builder, err := buildah.NewBuilder(b.store, builderOptions)
|
||||
if err != nil {
|
||||
@@ -578,6 +578,8 @@ func (b *Executor) Commit(ib *imagebuilder.Builder) (err error) {
|
||||
if err2 == nil {
|
||||
imageRef = imageRef2
|
||||
err = nil
|
||||
} else {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -586,6 +588,9 @@ func (b *Executor) Commit(ib *imagebuilder.Builder) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing reference for image to be written")
|
||||
}
|
||||
if ib.Author != "" {
|
||||
b.builder.SetMaintainer(ib.Author)
|
||||
}
|
||||
config := ib.Config()
|
||||
b.builder.SetHostname(config.Hostname)
|
||||
b.builder.SetDomainname(config.Domainname)
|
||||
|
||||
36
import.go
@@ -16,9 +16,9 @@ func importBuilderDataFromImage(store storage.Store, systemContext *types.System
|
||||
imageName := ""
|
||||
|
||||
if imageID != "" {
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+imageID)
|
||||
ref, err := is.Transport.ParseStoreReference(store, imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", "@"+imageID)
|
||||
return nil, errors.Wrapf(err, "no such image %q", imageID)
|
||||
}
|
||||
src, err2 := ref.NewImage(systemContext)
|
||||
if err2 != nil {
|
||||
@@ -68,7 +68,7 @@ func importBuilder(store storage.Store, options ImportOptions) (*Builder, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath)
|
||||
|
||||
builder, err := importBuilderDataFromImage(store, systemContext, c.ImageID, options.Container, c.ID)
|
||||
if err != nil {
|
||||
@@ -95,21 +95,27 @@ func importBuilder(store storage.Store, options ImportOptions) (*Builder, error)
|
||||
}
|
||||
|
||||
func importBuilderFromImage(store storage.Store, options ImportFromImageOptions) (*Builder, error) {
|
||||
var img *storage.Image
|
||||
var err error
|
||||
|
||||
if options.Image == "" {
|
||||
return nil, errors.Errorf("image name must be specified")
|
||||
}
|
||||
|
||||
img, err := util.FindImage(store, options.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
|
||||
for _, image := range util.ResolveName(options.Image, "", systemContext, store) {
|
||||
img, err = util.FindImage(store, image)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
builder, err2 := importBuilderDataFromImage(store, systemContext, img.ID, "", "")
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error importing build settings from image %q", options.Image)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
|
||||
builder, err := importBuilderDataFromImage(store, systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
|
||||
}
|
||||
|
||||
142
install.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Installation Instructions
|
||||
|
||||
## System Requirements
|
||||
|
||||
### Kernel Version Requirements
|
||||
To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required.
|
||||
On other Linux distributions Buildah requires a kernel version of 4.0 or
|
||||
higher in order to support the OverlayFS filesystem. The kernel version can be checked
|
||||
with the 'uname -a' command.
|
||||
|
||||
### runc Requirement
|
||||
|
||||
Buildah uses `runc` to run commands when `buildah run` is used, or when `buildah build-using-dockerfile`
|
||||
encounters a `RUN` instruction, so you'll also need to build and install a compatible version of
|
||||
[runc](https://github.com/opencontainers/runc) for Buildah to call for those cases. If Buildah is installed
|
||||
via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process.
|
||||
|
||||
## Package Installation
|
||||
|
||||
Buildah is available on several software repositories and can be installed via a package manager such
|
||||
as yum, dnf or apt-get on a number of Linux distributions.
|
||||
|
||||
## Installation from GitHub
|
||||
|
||||
Prior to installing Buildah, install the following packages on your Linux distro:
|
||||
* make
|
||||
* golang (Requires version 1.8.1 or higher.)
|
||||
* bats
|
||||
* btrfs-progs-devel
|
||||
* bzip2
|
||||
* device-mapper-devel
|
||||
* git
|
||||
* go-md2man
|
||||
* gpgme-devel
|
||||
* glib2-devel
|
||||
* libassuan-devel
|
||||
* ostree-devel
|
||||
* runc (Requires version 1.0 RC4 or higher.)
|
||||
* skopeo-containers
|
||||
|
||||
### Fedora
|
||||
|
||||
In Fedora, you can use this command:
|
||||
|
||||
```
|
||||
dnf -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
Then to install Buildah on Fedora follow the steps in this example:
|
||||
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
make
|
||||
sudo make install
|
||||
buildah --help
|
||||
```
|
||||
|
||||
### RHEL, CentOS
|
||||
|
||||
In RHEL and CentOS 7, ensure that you are subscribed to `rhel-7-server-rpms`,
|
||||
`rhel-7-server-extras-rpms`, and `rhel-7-server-optional-rpms`, then
|
||||
run this command:
|
||||
|
||||
```
|
||||
yum -y install \
|
||||
make \
|
||||
golang \
|
||||
bats \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
glib2-devel \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
ostree-devel \
|
||||
git \
|
||||
bzip2 \
|
||||
go-md2man \
|
||||
runc \
|
||||
skopeo-containers
|
||||
```
|
||||
|
||||
The build steps for Buildah on RHEL or CentOS are the same as Fedora, above.
|
||||
|
||||
### Ubuntu
|
||||
|
||||
In Ubuntu zesty and xenial, you can use these commands:
|
||||
|
||||
```
|
||||
apt-get -y install software-properties-common
|
||||
add-apt-repository -y ppa:alexlarsson/flatpak
|
||||
add-apt-repository -y ppa:gophers/archive
|
||||
apt-add-repository -y ppa:projectatomic/ppa
|
||||
apt-get -y -qq update
|
||||
apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
apt-get -y install golang-1.8
|
||||
```
|
||||
Then to install Buildah on Ubuntu follow the steps in this example:
|
||||
|
||||
```
|
||||
mkdir ~/buildah
|
||||
cd ~/buildah
|
||||
export GOPATH=`pwd`
|
||||
git clone https://github.com/projectatomic/buildah ./src/github.com/projectatomic/buildah
|
||||
cd ./src/github.com/projectatomic/buildah
|
||||
PATH=/usr/lib/go-1.8/bin:$PATH make runc all TAGS="apparmor seccomp"
|
||||
sudo make install install.runc
|
||||
buildah --help
|
||||
```
|
||||
|
||||
### Debian
|
||||
|
||||
To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch):
|
||||
|
||||
```
|
||||
gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
|
||||
gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
|
||||
echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
|
||||
apt update
|
||||
apt -y install -t stretch-backports libostree-dev golang
|
||||
apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
|
||||
```
|
||||
|
||||
The build steps on Debian are otherwise the same as Ubuntu, above.
|
||||
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 7.8 KiB |
2888
logos/buildah-logo-source.svg
Normal file
|
After Width: | Height: | Size: 170 KiB |
|
Before Width: | Height: | Size: 27 KiB |
BIN
logos/buildah-logo_large.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
logos/buildah-logo_large_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 12 KiB |
BIN
logos/buildah-logo_medium.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
logos/buildah-logo_medium_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
logos/buildah-logo_reverse_large.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
logos/buildah-logo_reverse_medium.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
logos/buildah-logo_reverse_small.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
|
Before Width: | Height: | Size: 7.1 KiB |
BIN
logos/buildah-logo_small.png
Normal file
|
After Width: | Height: | Size: 7.7 KiB |
BIN
logos/buildah-logo_small_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 7.0 KiB |
BIN
logos/buildah-logomark_large.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
logos/buildah-logomark_large_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
logos/buildah-logomark_medium.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
logos/buildah-logomark_medium_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 9.8 KiB |
BIN
logos/buildah-logomark_small.png
Normal file
|
After Width: | Height: | Size: 4.6 KiB |
BIN
logos/buildah-logomark_small_transparent-bg.png
Normal file
|
After Width: | Height: | Size: 4.1 KiB |
|
Before Width: | Height: | Size: 29 KiB |
1870
logos/buildah.svg
|
Before Width: | Height: | Size: 88 KiB |
213
new.go
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/buildah/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -26,6 +27,10 @@ const (
|
||||
// can't find one in the local Store, in order to generate a source
|
||||
// reference for the image that we can then copy to the local Store.
|
||||
DefaultTransport = "docker://"
|
||||
|
||||
// minimumTruncatedIDLength is the minimum length of an identifier that
|
||||
// we'll accept as possibly being a truncated image ID.
|
||||
minimumTruncatedIDLength = 3
|
||||
)
|
||||
|
||||
func reserveSELinuxLabels(store storage.Store, id string) error {
|
||||
@@ -58,91 +63,173 @@ func reserveSELinuxLabels(store storage.Store, id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullAndFindImage(store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
|
||||
ref, err := pullImage(store, imageName, options, sc)
|
||||
if err != nil {
|
||||
logrus.Debugf("error pulling image %q: %v", imageName, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
img, err := is.Transport.GetStoreImage(store, ref)
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading pulled image %q: %v", imageName, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return img, ref, nil
|
||||
}
|
||||
|
||||
func getImageName(name string, img *storage.Image) string {
|
||||
imageName := name
|
||||
if len(img.Names) > 0 {
|
||||
imageName = img.Names[0]
|
||||
// When the image used by the container is a tagged image
|
||||
// the container name might be set to the original image instead of
|
||||
// the image given in the "form" command line.
|
||||
// This loop is supposed to fix this.
|
||||
for _, n := range img.Names {
|
||||
if strings.Contains(n, name) {
|
||||
imageName = n
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return imageName
|
||||
}
|
||||
|
||||
func imageNamePrefix(imageName string) string {
|
||||
prefix := imageName
|
||||
s := strings.Split(imageName, "/")
|
||||
if len(s) > 0 {
|
||||
prefix = s[len(s)-1]
|
||||
}
|
||||
s = strings.Split(prefix, ":")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
s = strings.Split(prefix, "@")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func imageManifestAndConfig(ref types.ImageReference, systemContext *types.SystemContext) (manifest, config []byte, err error) {
|
||||
if ref != nil {
|
||||
src, err := ref.NewImage(systemContext)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
|
||||
}
|
||||
defer src.Close()
|
||||
config, err := src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(ref))
|
||||
}
|
||||
manifest, _, err := src.Manifest()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(ref))
|
||||
}
|
||||
return manifest, config, nil
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
var ref types.ImageReference
|
||||
var img *storage.Image
|
||||
manifest := []byte{}
|
||||
config := []byte{}
|
||||
var err error
|
||||
var manifest []byte
|
||||
var config []byte
|
||||
|
||||
if options.FromImage == BaseImageFakeName {
|
||||
options.FromImage = ""
|
||||
}
|
||||
image := options.FromImage
|
||||
|
||||
if options.Transport == "" {
|
||||
options.Transport = DefaultTransport
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(options.SignaturePolicyPath)
|
||||
systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
|
||||
|
||||
for _, image := range util.ResolveName(options.FromImage, options.Registry, systemContext, store) {
|
||||
if len(image) >= minimumTruncatedIDLength {
|
||||
if img, err = store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) {
|
||||
if ref, err = is.Transport.ParseStoreReference(store, img.ID); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
imageID := ""
|
||||
if image != "" {
|
||||
var err error
|
||||
if options.PullPolicy == PullAlways {
|
||||
pulledReference, err2 := pullImage(store, options, systemContext)
|
||||
pulledImg, pulledReference, err2 := pullAndFindImage(store, image, options, systemContext)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error pulling image %q", image)
|
||||
logrus.Debugf("error pulling and reading image %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
ref = pulledReference
|
||||
img = pulledImg
|
||||
break
|
||||
}
|
||||
if ref == nil {
|
||||
srcRef, err2 := alltransports.ParseImageName(image)
|
||||
if err2 != nil {
|
||||
srcRef2, err3 := alltransports.ParseImageName(options.Registry + image)
|
||||
if err3 != nil {
|
||||
srcRef3, err4 := alltransports.ParseImageName(options.Transport + options.Registry + image)
|
||||
if err4 != nil {
|
||||
return nil, errors.Wrapf(err4, "error parsing image name %q", options.Transport+options.Registry+image)
|
||||
}
|
||||
srcRef2 = srcRef3
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
|
||||
destImage, err2 := localImageNameForReference(store, srcRef)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error computing local image name for %q", transports.ImageName(srcRef))
|
||||
srcRef, err2 := alltransports.ParseImageName(image)
|
||||
if err2 != nil {
|
||||
if options.Transport == "" {
|
||||
logrus.Debugf("error parsing image name %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
if destImage == "" {
|
||||
return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
|
||||
srcRef2, err3 := alltransports.ParseImageName(options.Transport + image)
|
||||
if err3 != nil {
|
||||
logrus.Debugf("error parsing image name %q: %v", image, err2)
|
||||
err = err3
|
||||
continue
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
|
||||
ref, err = is.Transport.ParseStoreReference(store, destImage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
|
||||
}
|
||||
destImage, err2 := localImageNameForReference(store, srcRef)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
if destImage == "" {
|
||||
return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
image = destImage
|
||||
ref, err = is.Transport.ParseStoreReference(store, destImage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
|
||||
}
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing {
|
||||
return nil, errors.Wrapf(err, "no such image %q", transports.ImageName(ref))
|
||||
logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err)
|
||||
continue
|
||||
}
|
||||
ref2, err2 := pullImage(store, options, systemContext)
|
||||
pulledImg, pulledReference, err2 := pullAndFindImage(store, image, options, systemContext)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error pulling image %q", image)
|
||||
logrus.Debugf("error pulling and reading image %q: %v", image, err2)
|
||||
err = err2
|
||||
continue
|
||||
}
|
||||
ref = ref2
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", transports.ImageName(ref))
|
||||
ref = pulledReference
|
||||
img = pulledImg
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if options.FromImage != "" && (ref == nil || img == nil) {
|
||||
// If options.FromImage is set but we ended up
|
||||
// with nil in ref or in img then there was an error that
|
||||
// we should return.
|
||||
return nil, util.GetFailureCause(err, errors.Wrapf(storage.ErrImageUnknown, "no such image %q", options.FromImage))
|
||||
}
|
||||
image := options.FromImage
|
||||
imageID := ""
|
||||
if img != nil {
|
||||
image = getImageName(imageNamePrefix(image), img)
|
||||
imageID = img.ID
|
||||
src, err := ref.NewImage(systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
|
||||
}
|
||||
defer src.Close()
|
||||
config, err = src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(ref))
|
||||
}
|
||||
manifest, _, err = src.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(ref))
|
||||
}
|
||||
}
|
||||
if manifest, config, err = imageManifestAndConfig(ref, systemContext); err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading data from image %q", transports.ImageName(ref))
|
||||
}
|
||||
|
||||
name := "working-container"
|
||||
@@ -151,20 +238,7 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
} else {
|
||||
var err2 error
|
||||
if image != "" {
|
||||
prefix := image
|
||||
s := strings.Split(prefix, "/")
|
||||
if len(s) > 0 {
|
||||
prefix = s[len(s)-1]
|
||||
}
|
||||
s = strings.Split(prefix, ":")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
s = strings.Split(prefix, "@")
|
||||
if len(s) > 0 {
|
||||
prefix = s[0]
|
||||
}
|
||||
name = prefix + "-" + name
|
||||
name = imageNamePrefix(image) + "-" + name
|
||||
}
|
||||
suffix := 1
|
||||
tmpName := name
|
||||
@@ -177,6 +251,7 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
}
|
||||
name = tmpName
|
||||
}
|
||||
|
||||
coptions := storage.ContainerOptions{}
|
||||
container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions)
|
||||
if err != nil {
|
||||
@@ -191,7 +266,7 @@ func newBuilder(store storage.Store, options BuilderOptions) (*Builder, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := reserveSELinuxLabels(store, container.ID); err != nil {
|
||||
if err = reserveSELinuxLabels(store, container.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
processLabel, mountLabel, err := label.InitLabels(nil)
|
||||
|
||||
28
new_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage"
|
||||
)
|
||||
|
||||
func TestGetImageName(t *testing.T) {
|
||||
tt := []struct {
|
||||
caseName string
|
||||
name string
|
||||
names []string
|
||||
expected string
|
||||
}{
|
||||
{"tagged image", "busybox1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox1:latest"},
|
||||
{"image name not in the resolved image names", "image1", []string{"docker.io/library/busybox:latest", "docker.io/library/busybox1:latest"}, "docker.io/library/busybox:latest"},
|
||||
{"resolved image with empty name list", "image1", []string{}, "image1"},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
img := &storage.Image{Names: tc.names}
|
||||
res := getImageName(tc.name, img)
|
||||
if res != tc.expected {
|
||||
t.Errorf("test case '%s' failed: expected %#v but got %#v", tc.caseName, tc.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
41
pull.go
@@ -53,27 +53,17 @@ func localImageNameForReference(store storage.Store, srcRef types.ImageReference
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func pullImage(store storage.Store, options BuilderOptions, sc *types.SystemContext) (types.ImageReference, error) {
|
||||
name := options.FromImage
|
||||
|
||||
spec := name
|
||||
if options.Registry != "" {
|
||||
spec = options.Registry + spec
|
||||
}
|
||||
spec2 := spec
|
||||
if options.Transport != "" {
|
||||
spec2 = options.Transport + spec
|
||||
}
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(name)
|
||||
func pullImage(store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (types.ImageReference, error) {
|
||||
spec := imageName
|
||||
srcRef, err := alltransports.ParseImageName(spec)
|
||||
if err != nil {
|
||||
if options.Transport == "" {
|
||||
return nil, errors.Wrapf(err, "error parsing image name %q", spec)
|
||||
}
|
||||
spec = options.Transport + spec
|
||||
srcRef2, err2 := alltransports.ParseImageName(spec)
|
||||
if err2 != nil {
|
||||
srcRef3, err3 := alltransports.ParseImageName(spec2)
|
||||
if err3 != nil {
|
||||
return nil, errors.Wrapf(err3, "error parsing image name %q", spec2)
|
||||
}
|
||||
srcRef2 = srcRef3
|
||||
return nil, errors.Wrapf(err2, "error parsing image name %q", spec)
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
@@ -91,6 +81,12 @@ func pullImage(store storage.Store, options BuilderOptions, sc *types.SystemCont
|
||||
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
|
||||
}
|
||||
|
||||
img, err := srcRef.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error initializing %q as an image source", spec)
|
||||
}
|
||||
img.Close()
|
||||
|
||||
policy, err := signature.DefaultPolicy(sc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error obtaining default signature policy")
|
||||
@@ -103,12 +99,15 @@ func pullImage(store storage.Store, options BuilderOptions, sc *types.SystemCont
|
||||
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
logrus.Debugf("error destroying signature polcy context: %v", err2)
|
||||
logrus.Debugf("error destroying signature policy context: %v", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
logrus.Debugf("copying %q to %q", spec, name)
|
||||
logrus.Debugf("copying %q to %q", spec, destName)
|
||||
|
||||
err = cp.Image(policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, options.SystemContext, nil, ""))
|
||||
return destRef, err
|
||||
if err == nil {
|
||||
return destRef, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
18
run.go
@@ -12,6 +12,7 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
@@ -64,6 +65,8 @@ type RunOptions struct {
|
||||
// decision can be overridden by specifying either WithTerminal or
|
||||
// WithoutTerminal.
|
||||
Terminal int
|
||||
// Quiet tells the run to turn off output to stdout.
|
||||
Quiet bool
|
||||
}
|
||||
|
||||
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volumes []string) error {
|
||||
@@ -112,6 +115,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
|
||||
secretMounts, err := secretMounts(file, b.MountLabel, cdir)
|
||||
if err != nil {
|
||||
logrus.Warn("error mounting secrets, skipping...")
|
||||
continue
|
||||
}
|
||||
for _, mount := range secretMounts {
|
||||
if haveMount(mount.Destination) {
|
||||
@@ -134,8 +138,11 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
|
||||
if err = os.MkdirAll(volumePath, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||
}
|
||||
if err = label.Relabel(volumePath, b.MountLabel, false); err != nil {
|
||||
return errors.Wrapf(err, "error relabeling directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||
}
|
||||
srcPath := filepath.Join(mountPoint, volume)
|
||||
if err = copyFileWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||
if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
|
||||
}
|
||||
|
||||
@@ -244,11 +251,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
return errors.Wrapf(err, "error removing network namespace for run")
|
||||
}
|
||||
}
|
||||
if options.User != "" {
|
||||
user, err = getUser(mountPoint, options.User)
|
||||
} else {
|
||||
user, err = getUser(mountPoint, b.User())
|
||||
}
|
||||
user, err = b.user(mountPoint, options.User)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,6 +288,9 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
cmd.Dir = mountPoint
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
if options.Quiet {
|
||||
cmd.Stdout = nil
|
||||
}
|
||||
cmd.Stderr = os.Stderr
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
|
||||
48
tests/authenticate.bats
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "from-authenticate-cert-and-creds" {
|
||||
|
||||
buildah from --pull --name "alpine" --signature-policy ${TESTSDIR}/policy.json alpine
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword alpine localhost:5000/my-alpine
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# This should fail
|
||||
run buildah push localhost:5000/my-alpine --signature-policy ${TESTSDIR}/policy.json --tls-verify=true
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# This should fail
|
||||
run buildah from localhost:5000/my-alpine --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds baduser:badpassword
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# This should work
|
||||
run buildah from localhost:5000/my-alpine --name "my-alpine" --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds testuser:testpassword
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Create Dockerfile for bud tests
|
||||
FILE=./Dockerfile
|
||||
/bin/cat <<EOM >$FILE
|
||||
FROM localhost:5000/my-alpine
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
|
||||
# Remove containers and images before bud tests
|
||||
buildah rm --all
|
||||
buildah rmi -f --all
|
||||
|
||||
# bud test bad password should fail
|
||||
run buildah bud -f ./Dockerfile --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds=testuser:badpassword
|
||||
[ "$status" -ne 0 ]
|
||||
|
||||
# bud test this should work
|
||||
run buildah bud -f ./Dockerfile --signature-policy ${TESTSDIR}/policy.json --tls-verify=false --creds=testuser:testpassword
|
||||
echo $status
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Clean up
|
||||
rm -f ./Dockerfile
|
||||
buildah rm -a
|
||||
buildah rmi -f --all
|
||||
}
|
||||
@@ -236,3 +236,29 @@ load helpers
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-maintainer" {
|
||||
target=alpine-image
|
||||
buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/maintainer
|
||||
run buildah --debug=false inspect --type=image --format '{{.Docker.Author}}' ${target}
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = kilroy ]
|
||||
run buildah --debug=false inspect --type=image --format '{{.OCIv1.Author}}' ${target}
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = kilroy ]
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "bud-unrecognized-instruction" {
|
||||
target=alpine-image
|
||||
run buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/unrecognized
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ BOGUS ]]
|
||||
buildah rmi $(buildah --debug=false images -q)
|
||||
run buildah --debug=false images -q
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
2
tests/bud/maintainer/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM alpine
|
||||
MAINTAINER kilroy
|
||||
2
tests/bud/unrecognized/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM alpine
|
||||
BOGUS nope-nope-nope
|
||||
124
tests/byid.bats
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "from-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to create a container.
|
||||
run buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json ${iid}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
cid="$output"
|
||||
buildah rm $cid
|
||||
|
||||
# Use a truncated form of the image's ID to create a container.
|
||||
run buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
cid="$output"
|
||||
buildah rm $cid
|
||||
|
||||
buildah rmi $iid
|
||||
}
|
||||
|
||||
@test "inspect-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to inspect it.
|
||||
run buildah --debug=false inspect --type=image ${iid}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use a truncated copy of the image's ID to inspect it.
|
||||
run buildah --debug=false inspect --type=image ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
buildah rmi $iid
|
||||
}
|
||||
|
||||
@test "push-by-id" {
|
||||
for image in busybox kubernetes/pause ; do
|
||||
echo pulling/pushing image $image
|
||||
|
||||
TARGET=${TESTDIR}/subdir-$(basename $image)
|
||||
mkdir -p $TARGET $TARGET-truncated
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $IMAGE
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use the image's ID to push it.
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json $iid dir:$TARGET
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use a truncated form of the image's ID to push it.
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json ${iid:0:6} dir:$TARGET-truncated
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Use the image's complete ID to remove it.
|
||||
buildah rmi $iid
|
||||
done
|
||||
}
|
||||
|
||||
@test "rmi-by-id" {
|
||||
image=busybox
|
||||
|
||||
# Pull down the image, if we have to.
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
[ $? -eq 0 ]
|
||||
[ $(wc -l <<< "$cid") -eq 1 ]
|
||||
buildah rm $cid
|
||||
|
||||
# Get the image's ID.
|
||||
run buildah --debug=false images -q $image
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
iid="$output"
|
||||
|
||||
# Use a truncated copy of the image's ID to remove it.
|
||||
run buildah --debug=false rmi ${iid:0:6}
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
@@ -111,3 +111,29 @@ load helpers
|
||||
[ "$status" -ne 0 ]
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
@test "copy --chown" {
|
||||
mkdir -p ${TESTDIR}/subdir
|
||||
mkdir -p ${TESTDIR}/other-subdir
|
||||
createrandom ${TESTDIR}/subdir/randomfile
|
||||
createrandom ${TESTDIR}/subdir/other-randomfile
|
||||
createrandom ${TESTDIR}/randomfile
|
||||
createrandom ${TESTDIR}/other-subdir/randomfile
|
||||
createrandom ${TESTDIR}/other-subdir/other-randomfile
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
root=$(buildah mount $cid)
|
||||
buildah config --workingdir / $cid
|
||||
buildah copy --chown 1:1 $cid ${TESTDIR}/randomfile
|
||||
buildah copy --chown root:1 $cid ${TESTDIR}/randomfile /randomfile2
|
||||
buildah copy --chown nobody $cid ${TESTDIR}/randomfile /randomfile3
|
||||
buildah copy --chown nobody:root $cid ${TESTDIR}/subdir /subdir
|
||||
test $(stat -c "%u:%g" $root/randomfile) = "1:1"
|
||||
test $(stat -c "%U:%g" $root/randomfile2) = "root:1"
|
||||
test $(stat -c "%U" $root/randomfile3) = "nobody"
|
||||
(cd $root/subdir/; for i in *; do test $(stat -c "%U:%G" $i) = "nobody:root"; done)
|
||||
buildah copy --chown root:root $cid ${TESTDIR}/other-subdir /subdir
|
||||
(cd $root/subdir/; for i in *randomfile; do test $(stat -c "%U:%G" $i) = "root:root"; done)
|
||||
test $(stat -c "%U:%G" $root/subdir) = "nobody:root"
|
||||
buildah rm $cid
|
||||
}
|
||||
|
||||
44
tests/digest.bats
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
fromreftest() {
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json $1)
|
||||
pushdir=${TESTDIR}/fromreftest
|
||||
mkdir -p ${pushdir}/{1,2,3}
|
||||
buildah push --signature-policy ${TESTSDIR}/policy.json $1 dir:${pushdir}/1
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid new-image
|
||||
buildah push --signature-policy ${TESTSDIR}/policy.json new-image dir:${pushdir}/2
|
||||
buildah rmi new-image
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid dir:${pushdir}/3
|
||||
buildah rm $cid
|
||||
rm -fr ${pushdir}
|
||||
}
|
||||
|
||||
@test "from-by-digest-s1" {
|
||||
fromreftest kubernetes/pause@sha256:f8cd50c5a287dd8c5f226cf69c60c737d34ed43726c14b8a746d9de2d23eda2b
|
||||
}
|
||||
|
||||
@test "from-by-digest-s1-a-discarded-layer" {
|
||||
fromreftest docker/whalesay@sha256:178598e51a26abbc958b8a2e48825c90bc22e641de3d31e18aaf55f3258ba93b
|
||||
}
|
||||
|
||||
@test "from-by-tag-s1" {
|
||||
fromreftest kubernetes/pause:go
|
||||
}
|
||||
|
||||
@test "from-by-repo-only-s1" {
|
||||
fromreftest kubernetes/pause
|
||||
}
|
||||
|
||||
@test "from-by-digest-s2" {
|
||||
fromreftest alpine@sha256:e9cec9aec697d8b9d450edd32860ecd363f2f3174c8338beb5f809422d182c63
|
||||
}
|
||||
|
||||
@test "from-by-tag-s2" {
|
||||
fromreftest alpine:2.6
|
||||
}
|
||||
|
||||
@test "from-by-repo-only-s2" {
|
||||
fromreftest alpine
|
||||
}
|
||||
@@ -12,12 +12,10 @@ load helpers
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
buildah rmi ${elsewhere}
|
||||
[ "$cid" = elsewhere-img-working-container ]
|
||||
|
||||
cid=$(buildah from --pull-always --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
buildah rmi ${elsewhere}
|
||||
[ "$cid" = `basename ${elsewhere}`-working-container ]
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
@@ -26,12 +24,10 @@ load helpers
|
||||
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
buildah rmi ${elsewhere}
|
||||
[ "$cid" = elsewhere-img-working-container ]
|
||||
|
||||
cid=$(buildah from --pull-always --signature-policy ${TESTSDIR}/policy.json dir:${elsewhere})
|
||||
buildah rm $cid
|
||||
buildah rmi ${elsewhere}
|
||||
[ "$cid" = `basename ${elsewhere}`-working-container ]
|
||||
}
|
||||
|
||||
@@ -73,7 +69,6 @@ load helpers
|
||||
}
|
||||
|
||||
@test "from-authenticate-cert-and-creds" {
|
||||
|
||||
mkdir -p ${TESTDIR}/auth
|
||||
# Create creds and store in ${TESTDIR}/auth/htpasswd
|
||||
# docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > ${TESTDIR}/auth/htpasswd
|
||||
@@ -112,3 +107,24 @@ load helpers
|
||||
# buildah rm $ctrid
|
||||
# buildah rmi -f $(buildah --debug=false images -q)
|
||||
}
|
||||
|
||||
@test "from-tagged-image" {
|
||||
# Github #396: Make sure the container name starts with the correct image even when it's tagged.
|
||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json "$cid" scratch2
|
||||
buildah rm $cid
|
||||
buildah tag scratch2 scratch3
|
||||
cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json scratch3)
|
||||
[ "$cid" == scratch3-working-container ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi scratch2 scratch3
|
||||
|
||||
# Github https://github.com/projectatomic/buildah/issues/396#issuecomment-360949396
|
||||
cid=$(buildah from --pull=true --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
buildah rm $cid
|
||||
buildah tag alpine alpine2
|
||||
cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json docker.io/alpine2)
|
||||
[ "$cid" == alpine2-working-container ]
|
||||
buildah rm ${cid}
|
||||
buildah rmi alpine alpine2
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ function createrandom() {
|
||||
}
|
||||
|
||||
function buildah() {
|
||||
${BUILDAH_BINARY} --debug --root ${TESTDIR}/root --runroot ${TESTDIR}/runroot --storage-driver ${STORAGE_DRIVER} "$@"
|
||||
${BUILDAH_BINARY} --debug --registries-conf ${TESTSDIR}/registries.conf --root ${TESTDIR}/root --runroot ${TESTDIR}/runroot --storage-driver ${STORAGE_DRIVER} "$@"
|
||||
}
|
||||
|
||||
function imgtype() {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -33,7 +34,9 @@ func main() {
|
||||
policy := flag.String("signature-policy", "", "signature policy file")
|
||||
mtype := flag.String("expected-manifest-type", buildah.OCIv1ImageManifest, "expected manifest type")
|
||||
showm := flag.Bool("show-manifest", false, "output the manifest JSON")
|
||||
rebuildm := flag.Bool("rebuild-manifest", false, "rebuild the manifest JSON")
|
||||
showc := flag.Bool("show-config", false, "output the configuration JSON")
|
||||
rebuildc := flag.Bool("rebuild-config", false, "rebuild the configuration JSON")
|
||||
flag.Parse()
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
if debug != nil && *debug {
|
||||
@@ -79,6 +82,7 @@ func main() {
|
||||
logrus.Errorf("error opening storage: %v", err)
|
||||
return
|
||||
}
|
||||
is.Transport.SetStore(store)
|
||||
|
||||
errors := false
|
||||
defer func() {
|
||||
@@ -88,6 +92,7 @@ func main() {
|
||||
}
|
||||
}()
|
||||
for _, image := range args {
|
||||
var ref types.ImageReference
|
||||
oImage := v1.Image{}
|
||||
dImage := docker.V2Image{}
|
||||
oManifest := v1.Manifest{}
|
||||
@@ -97,9 +102,13 @@ func main() {
|
||||
|
||||
ref, err := is.Transport.ParseStoreReference(store, image)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing reference %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
ref2, err2 := alltransports.ParseImageName(image)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error parsing reference %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
ref = ref2
|
||||
}
|
||||
|
||||
img, err := ref.NewImage(systemContext)
|
||||
@@ -161,6 +170,66 @@ func main() {
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
switch manifestType {
|
||||
case buildah.OCIv1ImageManifest:
|
||||
if rebuildm != nil && *rebuildm {
|
||||
err = json.Unmarshal(manifest, &oManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifest, err = json.Marshal(oManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rebuildc != nil && *rebuildc {
|
||||
err = json.Unmarshal(config, &oImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
config, err = json.Marshal(oImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
case buildah.Dockerv2ImageManifest:
|
||||
if rebuildm != nil && *rebuildm {
|
||||
err = json.Unmarshal(manifest, &dManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
manifest, err = json.Marshal(dManifest)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rebuildc != nil && *rebuildc {
|
||||
err = json.Unmarshal(config, &dImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
config, err = json.Marshal(dImage)
|
||||
if err != nil {
|
||||
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||
errors = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectedConfigType != "" && configType != expectedConfigType {
|
||||
logrus.Errorf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
|
||||
errors = true
|
||||
|
||||
@@ -26,3 +26,15 @@ load helpers
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" != "" ]
|
||||
}
|
||||
|
||||
@test "HTML escaped" {
|
||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
buildah config --label maintainer="Darth Vader <dvader@darkside.io>" ${cid}
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid darkside-image
|
||||
buildah rm ${cid}
|
||||
output=$(buildah inspect --type image darkside-image)
|
||||
[ $(output | grep "u003" | wc -l) -eq 0 ]
|
||||
output=$(buildah inspect --type image darkside-image | grep "u003" | wc -l)
|
||||
[ "$output" -ne 0 ]
|
||||
buildah rmi darkside-image
|
||||
}
|
||||
|
||||
@@ -8,11 +8,15 @@ load helpers
|
||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json ${source})
|
||||
for format in "" docker oci ; do
|
||||
mkdir -p ${TESTDIR}/committed${format:+.${format}}
|
||||
buildah commit ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" scratch-image${format:+-${format}}
|
||||
buildah commit ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" dir:${TESTDIR}/committed${format:+.${format}}
|
||||
# Force no compression to generate what we push.
|
||||
buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" scratch-image${format:+-${format}}
|
||||
buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" dir:${TESTDIR}/committed${format:+.${format}}
|
||||
mkdir -p ${TESTDIR}/pushed${format:+.${format}}
|
||||
buildah push --signature-policy ${TESTSDIR}/policy.json scratch-image${format:+-${format}} dir:${TESTDIR}/pushed${format:+.${format}}
|
||||
diff -u ${TESTDIR}/committed${format:+.${format}}/manifest.json ${TESTDIR}/pushed${format:+.${format}}/manifest.json
|
||||
# Reencode the manifest to lose variations due to different encoders or definitions of structures.
|
||||
imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/committed${format:+.${format}} > ${TESTDIR}/manifest.committed${format:+.${format}}
|
||||
imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/pushed${format:+.${format}} > ${TESTDIR}/manifest.pushed${format:+.${format}}
|
||||
diff -u ${TESTDIR}/manifest.committed${format:+.${format}} ${TESTDIR}/manifest.pushed${format:+.${format}}
|
||||
[ "$output" = "" ]
|
||||
done
|
||||
buildah rm "$cid"
|
||||
@@ -38,3 +42,14 @@ load helpers
|
||||
buildah rmi alpine
|
||||
rm -rf my-dir
|
||||
}
|
||||
|
||||
@test "push with imageid" {
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
imageid=$(buildah images -q)
|
||||
run buildah push --signature-policy ${TESTSDIR}/policy.json $imageid dir:my-dir
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
buildah rm "$cid"
|
||||
buildah rmi alpine
|
||||
rm -rf my-dir
|
||||
}
|
||||
|
||||
57
tests/registries.bats
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "registries" {
|
||||
registrypair() {
|
||||
image=$1
|
||||
imagename=$2
|
||||
|
||||
# Clean up.
|
||||
for id in $(buildah --debug=false containers -q) ; do
|
||||
buildah rm ${id}
|
||||
done
|
||||
for id in $(buildah --debug=false images -q) ; do
|
||||
buildah rmi ${id}
|
||||
done
|
||||
|
||||
# Create a container by specifying the image with one name.
|
||||
buildah from --pull --signature-policy ${TESTSDIR}/policy.json $image
|
||||
|
||||
# Create a container by specifying the image with another name.
|
||||
buildah from --pull --signature-policy ${TESTSDIR}/policy.json $imagename
|
||||
|
||||
# Get their image IDs. They should be the same one.
|
||||
lastid=
|
||||
for cid in $(buildah --debug=false containers -q) ; do
|
||||
run buildah --debug=false inspect -f "{{.FromImageID}}" $cid
|
||||
echo "$output"
|
||||
[ $status -eq 0 ]
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
if [ "$lastid" != "" ] ; then
|
||||
[ "$output" = "$lastid" ]
|
||||
fi
|
||||
lastid="$output"
|
||||
done
|
||||
|
||||
# A quick bit of troubleshooting help.
|
||||
run buildah images
|
||||
echo "$output"
|
||||
[ "$iid" = "$nameiid" ]
|
||||
|
||||
# Clean up.
|
||||
for id in $(buildah --debug=false containers -q) ; do
|
||||
buildah rm ${id}
|
||||
done
|
||||
for id in $(buildah --debug=false images -q) ; do
|
||||
buildah rmi ${id}
|
||||
done
|
||||
}
|
||||
# Test with pairs of short and fully-qualified names that should be the same image.
|
||||
registrypair busybox docker.io/busybox
|
||||
registrypair docker.io/busybox busybox
|
||||
registrypair busybox docker.io/library/busybox
|
||||
registrypair docker.io/library/busybox busybox
|
||||
registrypair fedora-minimal registry.fedoraproject.org/fedora-minimal
|
||||
registrypair registry.fedoraproject.org/fedora-minimal fedora-minimal
|
||||
}
|
||||
25
tests/registries.conf
Normal file
@@ -0,0 +1,25 @@
|
||||
# This is a system-wide configuration file used to
|
||||
# keep track of registries for various container backends.
|
||||
# It adheres to TOML format and does not support recursive
|
||||
# lists of registries.
|
||||
|
||||
# The default location for this configuration file is /etc/containers/registries.conf.
|
||||
|
||||
# The only valid categories are: 'registries.search', 'registries.insecure',
|
||||
# and 'registries.block'.
|
||||
|
||||
[registries.search]
|
||||
registries = ['docker.io', 'registry.fedoraproject.org', 'registry.access.redhat.com']
|
||||
|
||||
# If you need to access insecure registries, add the registry's fully-qualified name.
|
||||
# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
|
||||
[registries.insecure]
|
||||
registries = []
|
||||
|
||||
|
||||
# If you need to block pull access from a registry, uncomment the section below
|
||||
# and add the registries fully-qualified name.
|
||||
#
|
||||
# Docker only
|
||||
[registries.block]
|
||||
registries = []
|
||||
12
tests/rm.bats
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "remove multiple containers errors" {
|
||||
run buildah --debug=false rm mycontainer1 mycontainer2 mycontainer3
|
||||
[ "${lines[0]}" == "error removing container \"mycontainer1\": error reading build container: container not known" ]
|
||||
[ "${lines[1]}" == "error removing container \"mycontainer2\": error reading build container: container not known" ]
|
||||
[ "${lines[2]}" == "error removing container \"mycontainer3\": error reading build container: container not known" ]
|
||||
[ $(wc -l <<< "$output") -eq 3 ]
|
||||
[ "${status}" -eq 1 ]
|
||||
}
|
||||
80
tests/rmi.bats
Normal file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "remove one image" {
|
||||
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
buildah rm "$cid"
|
||||
buildah rmi alpine
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
|
||||
@test "remove multiple images" {
|
||||
cid2=$(buildah from --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
cid3=$(buildah from --signature-policy ${TESTSDIR}/policy.json busybox)
|
||||
run buildah rmi alpine busybox
|
||||
[ "$status" -eq 1 ]
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" != "" ]
|
||||
|
||||
buildah rmi -f alpine busybox
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
|
||||
@test "remove all images" {
|
||||
cid1=$(buildah from --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
cid2=$(buildah from --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
cid3=$(buildah from --signature-policy ${TESTSDIR}/policy.json busybox)
|
||||
buildah rmi -a -f
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" == "" ]
|
||||
|
||||
cid1=$(buildah from --signature-policy ${TESTSDIR}/policy.json scratch)
|
||||
cid2=$(buildah from --signature-policy ${TESTSDIR}/policy.json alpine)
|
||||
cid3=$(buildah from --signature-policy ${TESTSDIR}/policy.json busybox)
|
||||
run buildah rmi --all
|
||||
[ "$status" -eq 1 ]
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" != "" ]
|
||||
|
||||
buildah rmi --all --force
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
|
||||
@test "use prune to remove dangling images" {
|
||||
createrandom ${TESTDIR}/randomfile
|
||||
createrandom ${TESTDIR}/other-randomfile
|
||||
|
||||
cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json busybox)
|
||||
|
||||
run buildah --debug=false images -q
|
||||
[ $(wc -l <<< "$output") -eq 1 ]
|
||||
|
||||
root=$(buildah mount $cid)
|
||||
cp ${TESTDIR}/randomfile $root/randomfile
|
||||
buildah unmount $cid
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid containers-storage:new-image
|
||||
|
||||
run buildah --debug=false images -q
|
||||
[ $(wc -l <<< "$output") -eq 2 ]
|
||||
|
||||
root=$(buildah mount $cid)
|
||||
cp ${TESTDIR}/other-randomfile $root/other-randomfile
|
||||
buildah unmount $cid
|
||||
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid containers-storage:new-image
|
||||
|
||||
run buildah --debug=false images -q
|
||||
[ $(wc -l <<< "$output") -eq 3 ]
|
||||
|
||||
buildah rmi --prune
|
||||
|
||||
run buildah --debug=false images -q
|
||||
[ $(wc -l <<< "$output") -eq 2 ]
|
||||
|
||||
buildah rmi --all --force
|
||||
run buildah --debug=false images -q
|
||||
[ "$output" == "" ]
|
||||
}
|
||||
@@ -8,7 +8,7 @@ load helpers
|
||||
fi
|
||||
|
||||
# Build a container to use for building the binaries.
|
||||
image=registry.fedoraproject.org/fedora:26
|
||||
image=registry.fedoraproject.org/fedora:27
|
||||
cid=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $image)
|
||||
root=$(buildah --debug=false mount $cid)
|
||||
commit=$(git log --format=%H -n 1)
|
||||
@@ -27,7 +27,7 @@ load helpers
|
||||
buildah --debug=false run $cid -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
|
||||
|
||||
# Build a second new container.
|
||||
cid2=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json registry.fedoraproject.org/fedora:26)
|
||||
cid2=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json registry.fedoraproject.org/fedora:27)
|
||||
root2=$(buildah --debug=false mount $cid2)
|
||||
|
||||
# Copy the binary packages from the first container to the second one, and build a list of
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# /bin/bash -v test_buildah_authentication.sh
|
||||
|
||||
########
|
||||
# System setup - dir for creds and start docker
|
||||
# System setup - Create dir for creds and start Docker
|
||||
########
|
||||
mkdir -p /root/auth
|
||||
systemctl restart docker
|
||||
@@ -97,22 +97,27 @@ buildah images
|
||||
docker logout localhost:5000
|
||||
|
||||
########
|
||||
# Push using only certs, this should fail.
|
||||
# Push using only certs, this should FAIL.
|
||||
########
|
||||
buildah push --cert-dir /root/auth --tls-verify=true alpine docker://localhost:5000/my-alpine
|
||||
|
||||
########
|
||||
# Push using creds, certs and no transport, this should work.
|
||||
# Push using creds, certs and no transport (docker://), this should work.
|
||||
########
|
||||
buildah push --cert-dir ~/auth --tls-verify=true --creds=testuser:testpassword alpine localhost:5000/my-alpine
|
||||
|
||||
########
|
||||
# No creds anywhere, only the certificate, this should fail.
|
||||
# Push using a bad password , this should FAIL.
|
||||
########
|
||||
buildah push --cert-dir ~/auth --tls-verify=true --creds=testuser:badpassword alpine localhost:5000/my-alpine
|
||||
|
||||
########
|
||||
# No creds anywhere, only the certificate, this should FAIL.
|
||||
########
|
||||
buildah from localhost:5000/my-alpine --cert-dir /root/auth --tls-verify=true
|
||||
|
||||
########
|
||||
# Log in with creds, this should work
|
||||
# From with creds and certs, this should work
|
||||
########
|
||||
ctrid=$(buildah from localhost:5000/my-alpine --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword)
|
||||
|
||||
@@ -154,7 +159,7 @@ buildah images
|
||||
########
|
||||
|
||||
########
|
||||
# No credentials, this should fail.
|
||||
# No credentials, this should FAIL.
|
||||
########
|
||||
buildah commit --cert-dir /root/auth --tls-verify=true alpine-working-container docker://localhost:5000/my-commit-alpine
|
||||
|
||||
@@ -163,10 +168,51 @@ buildah commit --cert-dir /root/auth --tls-verify=true alpine-working-container
|
||||
########
|
||||
buildah commit --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword alpine-working-container docker://localhost:5000/my-commit-alpine
|
||||
|
||||
########
|
||||
# Use bad password on from/pull, this should FAIL
|
||||
########
|
||||
buildah from localhost:5000/my-commit-alpine --pull-always --cert-dir /root/auth --tls-verify=true --creds=testuser:badpassword
|
||||
|
||||
########
|
||||
# Pull the new image that we just commited
|
||||
########
|
||||
buildah from localhost:5000/my-commit-alpine --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword
|
||||
buildah from localhost:5000/my-commit-alpine --pull-always --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword
|
||||
|
||||
########
|
||||
# Show stuff
|
||||
########
|
||||
docker ps --all
|
||||
|
||||
docker images
|
||||
|
||||
buildah containers
|
||||
|
||||
buildah images
|
||||
|
||||
########
|
||||
# Create Dockerfile
|
||||
########
|
||||
FILE=./Dockerfile
|
||||
/bin/cat <<EOM >$FILE
|
||||
FROM localhost:5000/my-commit-alpine
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
|
||||
########
|
||||
# Clean up Buildah
|
||||
########
|
||||
buildah rm --all
|
||||
buildah rmi -f $(buildah --debug=false images -q)
|
||||
|
||||
########
|
||||
# Try Buildah bud with creds but no auth, this should FAIL
|
||||
########
|
||||
buildah bud -f ./Dockerfile --tls-verify=true --creds=testuser:testpassword
|
||||
|
||||
########
|
||||
# Try Buildah bud with creds and auth, this should work
|
||||
########
|
||||
buildah bud -f ./Dockerfile --cert-dir /root/auth --tls-verify=true --creds=testuser:testpassword
|
||||
|
||||
########
|
||||
# Show stuff
|
||||
@@ -182,6 +228,9 @@ buildah images
|
||||
########
|
||||
# Clean up
|
||||
########
|
||||
read -p "Press enter to continue and clean up all"
|
||||
|
||||
rm -f ./Dockerfile
|
||||
rm -rf ${TESTDIR}/auth
|
||||
docker rm -f $(docker ps --all -q)
|
||||
docker rmi -f $(docker images -q)
|
||||
|
||||
@@ -74,9 +74,9 @@ scratchmnt=$(buildah mount $newcontainer)
|
||||
echo $scratchmnt
|
||||
|
||||
########
|
||||
# Install Fedora 26 bash and coreutils
|
||||
# Install Fedora 27 bash and coreutils
|
||||
########
|
||||
dnf install --installroot $scratchmnt --release 26 bash coreutils --setopt install_weak_deps=false -y
|
||||
dnf install --installroot $scratchmnt --release 27 bash coreutils --setopt install_weak_deps=false -y
|
||||
|
||||
########
|
||||
# Check /usr/bin on the new container
|
||||
@@ -91,7 +91,7 @@ FILE=./runecho.sh
|
||||
#!/bin/bash
|
||||
for i in {1..9};
|
||||
do
|
||||
echo "This is a new container from ipbabble [" $i "]"
|
||||
echo "This is a new container from ipbabble [" \$i "]"
|
||||
done
|
||||
EOM
|
||||
chmod +x $FILE
|
||||
@@ -107,7 +107,7 @@ buildah run $newcontainer
|
||||
# Add configuration information
|
||||
########
|
||||
buildah config --created-by "ipbabble" $newcontainer
|
||||
buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora26-bashecho $newcontainer
|
||||
buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora27-bashecho $newcontainer
|
||||
|
||||
########
|
||||
# Inspect the container, verifying above was put into it
|
||||
@@ -125,12 +125,12 @@ buildah unmount $newcontainer
|
||||
buildah commit $newcontainer fedora-bashecho
|
||||
|
||||
########
|
||||
# Check the images there should be a fedora-basecho:latest image
|
||||
# Check the images there should be a fedora-bashecho:latest image
|
||||
########
|
||||
buildah images
|
||||
|
||||
########
|
||||
# Inspect the fedora-baseecho image
|
||||
# Inspect the fedora-bashecho image
|
||||
########
|
||||
buildah inspect --type=image fedora-bashecho
|
||||
|
||||
@@ -146,14 +146,14 @@ dnf -y install docker
|
||||
systemctl start docker
|
||||
|
||||
########
|
||||
# Push fedora-basecho to the Docker daemon
|
||||
# Push fedora-bashecho to the Docker daemon
|
||||
########
|
||||
buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
|
||||
|
||||
########
|
||||
# Run fedora-bashecho from Docker
|
||||
########
|
||||
docker run fedoara-baseecho
|
||||
docker run fedora-bashecho
|
||||
|
||||
########
|
||||
# Time to remove Docker
|
||||
|
||||
124
tests/test_buildah_build_rpm.sh
Executable file
@@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# test_buildah_build_rpm.sh
|
||||
#
|
||||
# Meant to run on a freshly installed VM.
|
||||
# Installs the latest Git and Buildah and then
|
||||
# Builds and installs Buildah's RPM in a Buidah Container.
|
||||
# The baseline test is then run on this vm and then the
|
||||
# newly created BUILDAH rpm is installed and the baseline
|
||||
# test is rerun.
|
||||
#
|
||||
|
||||
########
|
||||
# Setup
|
||||
########
|
||||
IMAGE=registry.fedoraproject.org/fedora
|
||||
SBOX=/tmp/sandbox
|
||||
PACKAGES=/tmp/packages
|
||||
mkdir -p ${SBOX}/buildah
|
||||
GITROOT=${SBOX}/buildah
|
||||
TESTSDIR=${GITROOT}/tests
|
||||
|
||||
# Change packager as appropriate for the platform
|
||||
PACKAGER=dnf
|
||||
|
||||
${PACKAGER} install -y git
|
||||
${PACKAGER} install -y buildah
|
||||
|
||||
########
|
||||
# Clone buildah from GitHub.com
|
||||
########
|
||||
cd $SBOX
|
||||
git clone https://github.com/projectatomic/buildah.git
|
||||
cd $GITROOT
|
||||
|
||||
########
|
||||
# Build a container to use for building the binaries.
|
||||
########
|
||||
CTRID=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $IMAGE)
|
||||
ROOTMNT=$(buildah --debug=false mount $CTRID)
|
||||
COMMIT=$(git log --format=%H -n 1)
|
||||
SHORTCOMMIT=$(echo ${COMMIT} | cut -c-7)
|
||||
mkdir -p ${ROOTMNT}/rpmbuild/{SOURCES,SPECS}
|
||||
|
||||
########
|
||||
# Build the tarball.
|
||||
########
|
||||
(git archive --format tar.gz --prefix=buildah-${COMMIT}/ ${COMMIT}) > ${ROOTMNT}/rpmbuild/SOURCES/buildah-${SHORTCOMMIT}.tar.gz
|
||||
|
||||
########
|
||||
# Update the .spec file with the commit ID.
|
||||
########
|
||||
sed s:REPLACEWITHCOMMITID:${COMMIT}:g ${GITROOT}/contrib/rpm/buildah.spec > ${ROOTMNT}/rpmbuild/SPECS/buildah.spec
|
||||
|
||||
########
|
||||
# Install build dependencies and build binary packages.
|
||||
########
|
||||
buildah --debug=false run $CTRID -- dnf -y install 'dnf-command(builddep)' rpm-build
|
||||
buildah --debug=false run $CTRID -- dnf -y builddep --spec /rpmbuild/SPECS/buildah.spec
|
||||
buildah --debug=false run $CTRID -- rpmbuild --define "_topdir /rpmbuild" -ba /rpmbuild/SPECS/buildah.spec
|
||||
|
||||
########
|
||||
# Build a second new container.
|
||||
########
|
||||
CTRID2=$(buildah --debug=false from --pull --signature-policy ${TESTSDIR}/policy.json $IMAGE)
|
||||
ROOTMNT2=$(buildah --debug=false mount $CTRID2)
|
||||
|
||||
########
|
||||
# Copy the binary packages from the first container to the second one and to
|
||||
# /tmp. Also build a list of their filenames.
|
||||
########
|
||||
rpms=
|
||||
mkdir -p ${ROOTMNT2}/${PACKAGES}
|
||||
mkdir -p ${PACKAGES}
|
||||
for rpm in ${ROOTMNT}/rpmbuild/RPMS/*/*.rpm ; do
|
||||
cp $rpm ${ROOTMNT2}/${PACKAGES}
|
||||
cp $rpm ${PACKAGES}
|
||||
rpms="$rpms "${PACKAGES}/$(basename $rpm)
|
||||
done
|
||||
|
||||
########
|
||||
# Install the binary packages into the second container.
|
||||
########
|
||||
buildah --debug=false run $CTRID2 -- dnf -y install $rpms
|
||||
|
||||
########
|
||||
# Run the binary package and compare its self-identified version to the one we tried to build.
|
||||
########
|
||||
id=$(buildah --debug=false run $CTRID2 -- buildah version | awk '/^Git Commit:/ { print $NF }')
|
||||
bv=$(buildah --debug=false run $CTRID2 -- buildah version | awk '/^Version:/ { print $NF }')
|
||||
rv=$(buildah --debug=false run $CTRID2 -- rpm -q --queryformat '%{version}' buildah)
|
||||
echo "short commit: $SHORTCOMMIT"
|
||||
echo "id: $id"
|
||||
echo "buildah version: $bv"
|
||||
echo "buildah rpm version: $rv"
|
||||
test $SHORTCOMMIT = $id
|
||||
test $bv = $rv
|
||||
|
||||
########
|
||||
# Clean up Buildah
|
||||
########
|
||||
buildah rm $(buildah containers -q)
|
||||
buildah rmi -f $(buildah --debug=false images -q)
|
||||
|
||||
########
|
||||
# Kick off baseline testing against the installed Buildah
|
||||
########
|
||||
/bin/bash -v ${TESTSDIR}/test_buildah_baseline.sh
|
||||
|
||||
########
|
||||
# Install the Buildah we just built locally and run
|
||||
# the baseline tests again.
|
||||
########
|
||||
${PACKAGER} -y install ${PACKAGES}/*.rpm
|
||||
/bin/bash -v ${TESTSDIR}/test_buildah_baseline.sh
|
||||
|
||||
########
|
||||
# Clean up
|
||||
########
|
||||
rm -rf ${SBOX}
|
||||
rm -rf ${PACKAGES}
|
||||
buildah rm $(buildah containers -q)
|
||||
buildah rmi -f $(buildah images -q)
|
||||
${PACKAGER} remove -y buildah
|
||||
@@ -8,6 +8,6 @@ if ! which git-validation > /dev/null 2> /dev/null ; then
|
||||
fi
|
||||
if test "$TRAVIS" != true ; then
|
||||
#GITVALIDATE_EPOCH=":/git-validation epoch"
|
||||
GITVALIDATE_EPOCH="b1bb73e01c9bf0b1b75e50a2d1947b14a8174eee"
|
||||
GITVALIDATE_EPOCH="bf40000e72b351067ebae7b77d212a200f9ce051"
|
||||
fi
|
||||
exec git-validation -q -run DCO,short-subject ${GITVALIDATE_EPOCH:+-range "${GITVALIDATE_EPOCH}""..${GITVALIDATE_TIP:-@}"} ${GITVALIDATE_FLAGS}
|
||||
|
||||
@@ -11,6 +11,7 @@ exec gometalinter.v1 \
|
||||
--enable-gc \
|
||||
--exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \
|
||||
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
|
||||
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$'\
|
||||
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
|
||||
--exclude='vendor\/.*' \
|
||||
--disable=gotype \
|
||||
|
||||
138
util/util.go
@@ -1,27 +1,125 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/sysregistries"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// ExpandTags takes unqualified names, parses them as image names, and returns
|
||||
// the fully expanded result, including a tag.
|
||||
func ExpandTags(tags []string) ([]string, error) {
|
||||
expanded := []string{}
|
||||
for _, tag := range tags {
|
||||
name, err := reference.ParseNormalizedNamed(tag)
|
||||
const (
|
||||
minimumTruncatedIDLength = 3
|
||||
)
|
||||
|
||||
var (
|
||||
// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
|
||||
// to prepend to image names that only contain a single path component.
|
||||
RegistryDefaultPathPrefix = map[string]string{
|
||||
"index.docker.io": "library",
|
||||
"docker.io": "library",
|
||||
}
|
||||
)
|
||||
|
||||
// ResolveName checks if name is a valid image name, and if that name doesn't include a domain
|
||||
// portion, returns a list of the names which it might correspond to in the registries.
|
||||
func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) []string {
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Maybe it's a truncated image ID. Don't prepend a registry name, then.
|
||||
if len(name) >= minimumTruncatedIDLength {
|
||||
if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
|
||||
// It's a truncated version of the ID of an image that's present in local storage;
|
||||
// we need to expand the ID.
|
||||
return []string{img.ID}
|
||||
}
|
||||
}
|
||||
|
||||
// If the image name already included a domain component, we're done.
|
||||
named, err := reference.ParseNormalizedNamed(name)
|
||||
if err != nil {
|
||||
return []string{name}
|
||||
}
|
||||
if named.String() == name {
|
||||
// Parsing produced the same result, so there was a domain name in there to begin with.
|
||||
return []string{name}
|
||||
}
|
||||
if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" {
|
||||
// If this domain can cause us to insert something in the middle, check if that happened.
|
||||
repoPath := reference.Path(named)
|
||||
domain := reference.Domain(named)
|
||||
defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
|
||||
if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):]) == name {
|
||||
// Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
|
||||
return []string{name}
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out the list of registries.
|
||||
registries, err := sysregistries.GetRegistries(sc)
|
||||
if err != nil {
|
||||
logrus.Debugf("unable to complete image name %q: %v", name, err)
|
||||
return []string{name}
|
||||
}
|
||||
if sc.DockerInsecureSkipTLSVerify {
|
||||
if unverifiedRegistries, err := sysregistries.GetInsecureRegistries(sc); err == nil {
|
||||
registries = append(registries, unverifiedRegistries...)
|
||||
}
|
||||
}
|
||||
|
||||
// Create all of the combinations. Some registries need an additional component added, so
|
||||
// use our lookaside map to keep track of them. If there are no configured registries, at
|
||||
// least return the name as it was passed to us.
|
||||
candidates := []string{}
|
||||
for _, registry := range append([]string{firstRegistry}, registries...) {
|
||||
if registry == "" {
|
||||
continue
|
||||
}
|
||||
middle := ""
|
||||
if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && strings.IndexRune(name, '/') == -1 {
|
||||
middle = prefix
|
||||
}
|
||||
candidate := path.Join(registry, middle, name)
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
candidates = append(candidates, name)
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// ExpandNames takes unqualified names, parses them as image names, and returns
|
||||
// the fully expanded result, including a tag. Names which don't include a registry
|
||||
// name will be marked for the most-preferred registry (i.e., the first one in our
|
||||
// configuration).
|
||||
func ExpandNames(names []string) ([]string, error) {
|
||||
expanded := make([]string, 0, len(names))
|
||||
for _, n := range names {
|
||||
name, err := reference.ParseNormalizedNamed(n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing tag %q", tag)
|
||||
return nil, errors.Wrapf(err, "error parsing name %q", n)
|
||||
}
|
||||
name = reference.TagNameOnly(name)
|
||||
tag = ""
|
||||
tag := ""
|
||||
digest := ""
|
||||
if tagged, ok := name.(reference.NamedTagged); ok {
|
||||
tag = ":" + tagged.Tag()
|
||||
}
|
||||
expanded = append(expanded, name.Name()+tag)
|
||||
if digested, ok := name.(reference.Digested); ok {
|
||||
digest = "@" + digested.Digest().String()
|
||||
}
|
||||
expanded = append(expanded, name.Name()+tag+digest)
|
||||
}
|
||||
return expanded, nil
|
||||
}
|
||||
@@ -48,7 +146,7 @@ func FindImage(store storage.Store, image string) (*storage.Image, error) {
|
||||
|
||||
// AddImageNames adds the specified names to the specified image.
|
||||
func AddImageNames(store storage.Store, image *storage.Image, addNames []string) error {
|
||||
names, err := ExpandTags(addNames)
|
||||
names, err := ExpandNames(addNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,3 +156,23 @@ func AddImageNames(store storage.Store, image *storage.Image, addNames []string)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFailureCause checks the type of the error "err" and returns a new
|
||||
// error message that reflects the reason of the failure.
|
||||
// In case err type is not a familiar one the error "defaultError" is returned.
|
||||
func GetFailureCause(err, defaultError error) error {
|
||||
switch nErr := errors.Cause(err).(type) {
|
||||
case errcode.Errors:
|
||||
return cli.NewMultiError([]error(nErr)...)
|
||||
case errcode.Error, *url.Error:
|
||||
return nErr
|
||||
default:
|
||||
// HACK: In case the error contains "not authorized" like in
|
||||
// https://github.com/containers/image/blob/master/docker/docker_image_dest.go#L193-L205
|
||||
// TODO(bshuster): change "containers/images" to return "errcode" rather than "error".
|
||||
if strings.Contains(nErr.Error(), "not authorized") {
|
||||
return fmt.Errorf("unauthorized: authentication required")
|
||||
}
|
||||
return defaultError
|
||||
}
|
||||
}
|
||||
|
||||