Update LinuxKit for merge of moby tool

- use the mkimage hashes that we had in LinuxKit as more up to date than tool.
- update docs
- move the code from moby under src/cmd/linuxkit

Signed-off-by: Justin Cormack <justin@specialbusservice.com>
This commit is contained in:
Justin Cormack
2018-07-14 11:13:03 +01:00
committed by Justin Cormack
parent 021b5718f8
commit cf5cec204d
41 changed files with 18 additions and 3872 deletions

View File

@@ -3,7 +3,7 @@ jobs:
build:
working_directory: /go/src/github.com/linuxkit/linuxkit
docker:
- image: circleci/golang:1.9-stretch
- image: circleci/golang:1.10-stretch
steps:
- checkout
- run: mkdir -p ./bin

View File

@@ -58,8 +58,6 @@ linuxkit build linuxkit.yml
to build the example configuration. You can also specify different output formats, eg `linuxkit build -format raw-bios linuxkit.yml` to
output a raw BIOS bootable disk image, or `linuxkit build -format iso-efi linuxkit.yml` to output an EFI bootable ISO image. See `linuxkit build -help` for more information.
Since `linuxkit build` is built around the [Moby tool](https://github.com/moby/tool) the input yml files are described in the [Moby tool documentation](https://github.com/moby/tool/blob/master/docs/yaml.md).
### Booting and Testing
You can use `linuxkit run <name>` or `linuxkit run <name>.<format>` to
@@ -117,7 +115,7 @@ To customise, copy or modify the [`linuxkit.yml`](linuxkit.yml) to your own `fil
generate its specified output. You can run the output with `linuxkit run file`.
The yaml file specifies a kernel and base init system, a set of containers that are built into the generated image and started at boot time. You can specify the type
of artifact to build with the `moby` tool eg `linuxkit build -format vhd linuxkit.yml`.
of artifact to build eg `linuxkit build -format vhd linuxkit.yml`.
If you want to build your own packages, see this [document](docs/packages.md).
@@ -131,7 +129,7 @@ The yaml format specifies the image to be built:
- `services` is the system services, which normally run for the whole time the system is up
- `files` are additional files to add to the image
For a more detailed overview of the options see [yaml documentation](https://github.com/moby/tool/blob/master/docs/yaml.md)
For a more detailed overview of the options see [yaml documentation](docs/yaml.md)
## Architecture and security

View File

@@ -1,7 +1,7 @@
## Private Images
When building, `moby` downloads, and optionally checks the notary signature, on any OCI images referenced in any section.
When building, `linuxkit build` downloads, and optionally checks the notary signature, on any OCI images referenced in any section.
As of this writing, `moby` does **not** have the ability to download these images from registries that require credentials to access. This is equally true for private images on public registries, like https://hub.docker.com, as for private registries.
As of this writing, it does **not** have the ability to download these images from registries that require credentials to access. This is equally true for private images on public registries, like https://hub.docker.com, as for private registries.
We are working on enabling private images with credentials. Until such time as that feature is added, you can follow these steps to build a moby image using OCI images
that require credentials to access:
@@ -10,4 +10,4 @@ that require credentials to access:
2. `docker pull` to download the images to your local machine where you will run `moby build`.
3. Run `moby build` (or `linuxkit build`).
Additionally, ensure that you do **not** have trust enabled for those images. See the section on [trust](#trust) in this document. Alternately, you can run `moby build` or `linuxkit build` with `--disable-trust`.
Additionally, ensure that you do **not** have trust enabled for those images. See the section on [trust](#trust) in this document. Alternately, you can run `linuxkit build` with `--disable-trust`.

View File

@@ -13,12 +13,6 @@ Details of usage of the `vndr` tool and the format of `vendor.conf` can be found
Once done, you must run the `vndr` tool to add the necessary files to the `vendor` directory.
The easiest way to do this is in a container.
Currently if updating `github.com/moby/tool` it is also necessary to
update `src/cmd/linuxkit/build.go` manually after updating `vendor.conf`:
hash=$(awk '/^github.com\/moby\/tool/ { print $2 }' src/cmd/linuxkit/vendor.conf)
curl -fsSL -o src/cmd/linuxkit/build.go https://raw.githubusercontent.com/moby/tool/${hash}/cmd/moby/build.go
## Updating in a container
To update all dependencies:

View File

@@ -14,7 +14,7 @@ import (
"golang.org/x/sys/unix"
)
// Note these definitions are from moby/tool/src/moby/config.go and should be kept in sync
// Note these definitions are from src/moby/config.go and should be kept in sync
// Runtime is the type of config processed at runtime, not used to build the OCI spec
type Runtime struct {

View File

@@ -11,7 +11,7 @@ import (
"path/filepath"
"strings"
"github.com/moby/tool/src/moby"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/moby"
log "github.com/sirupsen/logrus"
)

View File

@@ -10,7 +10,7 @@ import (
"path/filepath"
"strings"
"github.com/moby/tool/src/pad4"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/pad4"
"github.com/surma/gocpio"
)

View File

@@ -8,34 +8,11 @@ import (
"path/filepath"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/version"
"github.com/moby/tool/src/moby"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
func init() {
// Register LinuxKit images to build outputs with the vendored moby tool.
// This allows us to overwrite the hashes locally without having
// to re-vendor the 'github.com/moby/tool' when we update 'mkimage-*'
imgs := map[string]string{
"iso-bios": "linuxkit/mkimage-iso-bios:fd0092700bc19ea36cc8dccccc9799b7847b4909",
"iso-efi": "linuxkit/mkimage-iso-efi:79148c60bbf2a9526d976d708840492d85b0c576",
"raw-bios": "linuxkit/mkimage-raw-bios:0ff04de5d11a88b0712cdc85b2ee5f0b966ffccf",
"raw-efi": "linuxkit/mkimage-raw-efi:084f159cb44dc6c22351a70f1c1a043857be4e12",
"squashfs": "linuxkit/mkimage-squashfs:36f3fa106cfb7f8b818a828d7aebb27f946c9526",
"gcp": "linuxkit/mkimage-gcp:e6cdcf859ab06134c0c37a64ed5f886ec8dae1a1",
"qcow2-efi": "linuxkit/mkimage-qcow2-efi:0eb853459785fad0b518d8edad3b7434add6ad96",
"vhd": "linuxkit/mkimage-vhd:3820219e5c350fe8ab2ec6a217272ae82f4b9242",
"dynamic-vhd": "linuxkit/mkimage-dynamic-vhd:743ac9959fe6d3912ebd78b4fd490b117c53f1a6",
"vmdk": "linuxkit/mkimage-vmdk:cee81a3ed9c44ae446ef7ebff8c42c1e77b3e1b5",
"rpi3": "linuxkit/mkimage-rpi3:be740259f3b49bfe46f5322e22709c3af2111b33",
}
if err := moby.UpdateOutputImages(imgs); err != nil {
log.Fatalf("Failed to register mkimage-*. %v", err)
}
}
// GlobalConfig is the global tool configuration
type GlobalConfig struct {
Pkg PkgConfig `yaml:"pkg"`

View File

@@ -10,23 +10,23 @@ import (
"runtime"
"strings"
"github.com/moby/tool/src/initrd"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/initrd"
log "github.com/sirupsen/logrus"
)
var (
outputImages = map[string]string{
"iso-bios": "linuxkit/mkimage-iso-bios:9a51dc64a461f1cc50ba05f30a38f73f5227ac03",
"iso-efi": "linuxkit/mkimage-iso-efi:343cf1a8ac0aba7d8a1f13b7f45fa0b57ab897dc",
"raw-bios": "linuxkit/mkimage-raw-bios:d90713b2dd610cf9a0f5f9d9095f8bf86f40d5c6",
"raw-efi": "linuxkit/mkimage-raw-efi:8938ffb6014543e557b624a40cce1714f30ce4b6",
"squashfs": "linuxkit/mkimage-squashfs:b44d00b0a336fd32c122ff32bd2b39c36a965135",
"iso-bios": "linuxkit/mkimage-iso-bios:fd0092700bc19ea36cc8dccccc9799b7847b4909",
"iso-efi": "linuxkit/mkimage-iso-efi:79148c60bbf2a9526d976d708840492d85b0c576",
"raw-bios": "linuxkit/mkimage-raw-bios:0ff04de5d11a88b0712cdc85b2ee5f0b966ffccf",
"raw-efi": "linuxkit/mkimage-raw-efi:084f159cb44dc6c22351a70f1c1a043857be4e12",
"squashfs": "linuxkit/mkimage-squashfs:36f3fa106cfb7f8b818a828d7aebb27f946c9526",
"gcp": "linuxkit/mkimage-gcp:e6cdcf859ab06134c0c37a64ed5f886ec8dae1a1",
"qcow2-efi": "linuxkit/mkimage-qcow2-efi:787b54906e14a56b9f1da35dcc8e46bd58435285",
"qcow2-efi": "linuxkit/mkimage-qcow2-efi:0eb853459785fad0b518d8edad3b7434add6ad96",
"vhd": "linuxkit/mkimage-vhd:3820219e5c350fe8ab2ec6a217272ae82f4b9242",
"dynamic-vhd": "linuxkit/mkimage-dynamic-vhd:743ac9959fe6d3912ebd78b4fd490b117c53f1a6",
"vmdk": "linuxkit/mkimage-vmdk:cee81a3ed9c44ae446ef7ebff8c42c1e77b3e1b5",
"rpi3": "linuxkit/mkimage-rpi3:0f23c4f37cdca99281ca33ac6188e1942fa7a2b8",
"rpi3": "linuxkit/mkimage-rpi3:be740259f3b49bfe46f5322e22709c3af2111b33",
}
)

View File

@@ -9,7 +9,7 @@ import (
"path/filepath"
"strings"
"github.com/moby/tool/src/moby"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/moby"
)
// Contains fields settable in the build.yml

View File

@@ -24,9 +24,6 @@ github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/mitchellh/go-ps 4fdf99ab29366514c69ccccddab5dc58b8d84062
github.com/moby/datakit 97b3d230535397a813323902c23751e176481a86
github.com/moby/hyperkit d65b09c1c28a2bfb6a976c86ecd885d2ee4c71d3
# When updating also:
# curl -fsSL -o src/cmd/linuxkit/build.go https://raw.githubusercontent.com/moby/tool/«hash»/cmd/moby/build.go
github.com/moby/tool 3dbad3b7daffd631d036493a1e883608206d2e03
github.com/moby/vpnkit 0e4293bb1058598c4b0a406ed171f52573ef414c
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/opencontainers/image-spec v1.0.0

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,13 +0,0 @@
Copyright 2015-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,14 +0,0 @@
# Moby
[Moby Project](https://mobyproject.org)
The Moby Project is an open framework created by Docker to assemble specialized container systems without reinventing the wheel.
At the core of Moby is a framework which provides a “lego set” of dozens of standard components and tools for assembling them into custom platforms.
For more information, please visit the [Moby Project home page](https://mobyproject.org).
## Documentation
* [Format of moby input yml](./docs/yaml.md)
* [Using private images](./docs/privateimages.md)

View File

@@ -1,196 +0,0 @@
package initrd
import (
"archive/tar"
"bytes"
"compress/gzip"
"errors"
"io"
"io/ioutil"
"path/filepath"
"strings"
"github.com/moby/tool/src/pad4"
"github.com/surma/gocpio"
)
// Writer is an io.WriteCloser that writes to an initrd
// This is a compressed cpio archive, zero padded to 4 bytes
type Writer struct {
pw *pad4.Writer
gw *gzip.Writer
cw *cpio.Writer
}
func typeconv(thdr *tar.Header) int64 {
switch thdr.Typeflag {
case tar.TypeReg:
return cpio.TYPE_REG
case tar.TypeRegA:
return cpio.TYPE_REG
// Currently hard links not supported very well :)
// Convert to relative symlink as absolute will not work in container
// cpio does support hardlinks but file contents still duplicated, so rely
// on compression to fix that which is fairly ugly. Symlink has not caused issues.
case tar.TypeLink:
dir := filepath.Dir(thdr.Name)
rel, err := filepath.Rel(dir, thdr.Linkname)
if err != nil {
// should never happen, but leave as full abs path
rel = "/" + thdr.Linkname
}
thdr.Linkname = rel
return cpio.TYPE_SYMLINK
case tar.TypeSymlink:
return cpio.TYPE_SYMLINK
case tar.TypeChar:
return cpio.TYPE_CHAR
case tar.TypeBlock:
return cpio.TYPE_BLK
case tar.TypeDir:
return cpio.TYPE_DIR
case tar.TypeFifo:
return cpio.TYPE_FIFO
default:
return -1
}
}
func copyTarEntry(w *Writer, thdr *tar.Header, r io.Reader) (written int64, err error) {
tp := typeconv(thdr)
if tp == -1 {
return written, errors.New("cannot convert tar file")
}
size := thdr.Size
if tp == cpio.TYPE_SYMLINK {
size = int64(len(thdr.Linkname))
}
chdr := cpio.Header{
Mode: thdr.Mode,
Uid: thdr.Uid,
Gid: thdr.Gid,
Mtime: thdr.ModTime.Unix(),
Size: size,
Devmajor: thdr.Devmajor,
Devminor: thdr.Devminor,
Type: tp,
Name: thdr.Name,
}
err = w.WriteHeader(&chdr)
if err != nil {
return
}
var n int64
switch tp {
case cpio.TYPE_SYMLINK:
buffer := bytes.NewBufferString(thdr.Linkname)
n, err = io.Copy(w, buffer)
case cpio.TYPE_REG:
n, err = io.Copy(w, r)
}
written += n
return
}
// CopyTar copies a tar stream into an initrd
func CopyTar(w *Writer, r *tar.Reader) (written int64, err error) {
for {
var thdr *tar.Header
thdr, err = r.Next()
if err == io.EOF {
return written, nil
}
if err != nil {
return
}
written, err = copyTarEntry(w, thdr, r)
if err != nil {
return
}
}
}
// CopySplitTar copies a tar stream into an initrd, but splits out kernel, cmdline, and ucode
func CopySplitTar(w *Writer, r *tar.Reader) (kernel []byte, cmdline string, ucode []byte, err error) {
for {
var thdr *tar.Header
thdr, err = r.Next()
if err == io.EOF {
return kernel, cmdline, ucode, nil
}
if err != nil {
return
}
switch {
case thdr.Name == "boot/kernel":
kernel, err = ioutil.ReadAll(r)
if err != nil {
return
}
case thdr.Name == "boot/cmdline":
var buf []byte
buf, err = ioutil.ReadAll(r)
if err != nil {
return
}
cmdline = string(buf)
case thdr.Name == "boot/ucode.cpio":
ucode, err = ioutil.ReadAll(r)
if err != nil {
return
}
case strings.HasPrefix(thdr.Name, "boot/"):
// skip the rest of ./boot
default:
_, err = copyTarEntry(w, thdr, r)
if err != nil {
return
}
}
}
}
// NewWriter creates a writer that will output an initrd stream
func NewWriter(w io.Writer) *Writer {
initrd := new(Writer)
initrd.pw = pad4.NewWriter(w)
initrd.gw = gzip.NewWriter(initrd.pw)
initrd.cw = cpio.NewWriter(initrd.gw)
return initrd
}
// WriteHeader writes a cpio header into an initrd
func (w *Writer) WriteHeader(hdr *cpio.Header) error {
return w.cw.WriteHeader(hdr)
}
// Write writes a cpio file into an initrd
func (w *Writer) Write(b []byte) (n int, e error) {
return w.cw.Write(b)
}
// Close closes the writer
func (w *Writer) Close() error {
err1 := w.cw.Close()
err2 := w.gw.Close()
err3 := w.pw.Close()
if err1 != nil {
return err1
}
if err2 != nil {
return err2
}
if err3 != nil {
return err3
}
return nil
}
// Copy reads a tarball in a stream and outputs a compressed init ram disk
func Copy(w *Writer, r io.Reader) (int64, error) {
tr := tar.NewReader(r)
return CopyTar(w, tr)
}

View File

@@ -1,591 +0,0 @@
package moby
import (
"archive/tar"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
var streamable = map[string]bool{
"docker": true,
"tar": true,
}
// Streamable returns true if an output can be streamed
func Streamable(t string) bool {
return streamable[t]
}
type addFun func(*tar.Writer) error
const dockerfile = `
FROM scratch
COPY . ./
ENTRYPOINT ["/bin/rc.init"]
`
// For now this is a constant that we use in init section only to make
// resolv.conf point at somewhere writeable. In future whe we are not using
// Docker to extract images we can read this directly from image, but now Docker
// will overwrite anything we put in the image.
const resolvconfSymlink = "/run/resolvconf/resolv.conf"
var additions = map[string]addFun{
"docker": func(tw *tar.Writer) error {
log.Infof(" Adding Dockerfile")
hdr := &tar.Header{
Name: "Dockerfile",
Mode: 0644,
Size: int64(len(dockerfile)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write([]byte(dockerfile)); err != nil {
return err
}
return nil
},
}
// OutputTypes returns a list of the valid output types
func OutputTypes() []string {
ts := []string{}
for k := range streamable {
ts = append(ts, k)
}
for k := range outFuns {
ts = append(ts, k)
}
sort.Strings(ts)
return ts
}
func enforceContentTrust(fullImageName string, config *TrustConfig) bool {
for _, img := range config.Image {
// First check for an exact name match
if img == fullImageName {
return true
}
// Also check for an image name only match
// by removing a possible tag (with possibly added digest):
imgAndTag := strings.Split(fullImageName, ":")
if len(imgAndTag) >= 2 && img == imgAndTag[0] {
return true
}
// and by removing a possible digest:
imgAndDigest := strings.Split(fullImageName, "@sha256:")
if len(imgAndDigest) >= 2 && img == imgAndDigest[0] {
return true
}
}
for _, org := range config.Org {
var imgOrg string
splitName := strings.Split(fullImageName, "/")
switch len(splitName) {
case 0:
// if the image is empty, return false
return false
case 1:
// for single names like nginx, use library
imgOrg = "library"
case 2:
// for names that assume docker hub, like linxukit/alpine, take the first split
imgOrg = splitName[0]
default:
// for names that include the registry, the second piece is the org, ex: docker.io/library/alpine
imgOrg = splitName[1]
}
if imgOrg == org {
return true
}
}
return false
}
func outputImage(image *Image, section string, prefix string, m Moby, idMap map[string]uint32, dupMap map[string]string, pull bool, iw *tar.Writer) error {
log.Infof(" Create OCI config for %s", image.Image)
useTrust := enforceContentTrust(image.Image, &m.Trust)
oci, runtime, err := ConfigToOCI(image, useTrust, idMap)
if err != nil {
return fmt.Errorf("Failed to create OCI spec for %s: %v", image.Image, err)
}
config, err := json.MarshalIndent(oci, "", " ")
if err != nil {
return fmt.Errorf("Failed to create config for %s: %v", image.Image, err)
}
path := path.Join("containers", section, prefix+image.Name)
readonly := oci.Root.Readonly
err = ImageBundle(path, image.ref, config, runtime, iw, useTrust, pull, readonly, dupMap)
if err != nil {
return fmt.Errorf("Failed to extract root filesystem for %s: %v", image.Image, err)
}
return nil
}
// Build performs the actual build process
func Build(m Moby, w io.Writer, pull bool, tp string) error {
if MobyDir == "" {
MobyDir = defaultMobyConfigDir()
}
// create tmp dir in case needed
if err := os.MkdirAll(filepath.Join(MobyDir, "tmp"), 0755); err != nil {
return err
}
iw := tar.NewWriter(w)
// add additions
addition := additions[tp]
// allocate each container a uid, gid that can be referenced by name
idMap := map[string]uint32{}
id := uint32(100)
for _, image := range m.Onboot {
idMap[image.Name] = id
id++
}
for _, image := range m.Onshutdown {
idMap[image.Name] = id
id++
}
for _, image := range m.Services {
idMap[image.Name] = id
id++
}
// deduplicate containers with the same image
dupMap := map[string]string{}
if m.Kernel.ref != nil {
// get kernel and initrd tarball and ucode cpio archive from container
log.Infof("Extract kernel image: %s", m.Kernel.ref)
kf := newKernelFilter(iw, m.Kernel.Cmdline, m.Kernel.Binary, m.Kernel.Tar, m.Kernel.UCode)
err := ImageTar(m.Kernel.ref, "", kf, enforceContentTrust(m.Kernel.ref.String(), &m.Trust), pull, "")
if err != nil {
return fmt.Errorf("Failed to extract kernel image and tarball: %v", err)
}
err = kf.Close()
if err != nil {
return fmt.Errorf("Close error: %v", err)
}
}
// convert init images to tarballs
if len(m.Init) != 0 {
log.Infof("Add init containers:")
}
for _, ii := range m.initRefs {
log.Infof("Process init image: %s", ii)
err := ImageTar(ii, "", iw, enforceContentTrust(ii.String(), &m.Trust), pull, resolvconfSymlink)
if err != nil {
return fmt.Errorf("Failed to build init tarball from %s: %v", ii, err)
}
}
if len(m.Onboot) != 0 {
log.Infof("Add onboot containers:")
}
for i, image := range m.Onboot {
so := fmt.Sprintf("%03d", i)
if err := outputImage(image, "onboot", so+"-", m, idMap, dupMap, pull, iw); err != nil {
return err
}
}
if len(m.Onshutdown) != 0 {
log.Infof("Add onshutdown containers:")
}
for i, image := range m.Onshutdown {
so := fmt.Sprintf("%03d", i)
if err := outputImage(image, "onshutdown", so+"-", m, idMap, dupMap, pull, iw); err != nil {
return err
}
}
if len(m.Services) != 0 {
log.Infof("Add service containers:")
}
for _, image := range m.Services {
if err := outputImage(image, "services", "", m, idMap, dupMap, pull, iw); err != nil {
return err
}
}
// add files
err := filesystem(m, iw, idMap)
if err != nil {
return fmt.Errorf("failed to add filesystem parts: %v", err)
}
// add anything additional for this output type
if addition != nil {
err = addition(iw)
if err != nil {
return fmt.Errorf("Failed to add additional files: %v", err)
}
}
err = iw.Close()
if err != nil {
return fmt.Errorf("initrd close error: %v", err)
}
return nil
}
// kernelFilter is a tar.Writer that transforms a kernel image into the output we want on underlying tar writer
type kernelFilter struct {
tw *tar.Writer
buffer *bytes.Buffer
cmdline string
kernel string
tar string
ucode string
discard bool
foundKernel bool
foundKTar bool
foundUCode bool
}
func newKernelFilter(tw *tar.Writer, cmdline string, kernel string, tar, ucode *string) *kernelFilter {
tarName, kernelName, ucodeName := "kernel.tar", "kernel", ""
if tar != nil {
tarName = *tar
if tarName == "none" {
tarName = ""
}
}
if kernel != "" {
kernelName = kernel
}
if ucode != nil {
ucodeName = *ucode
}
return &kernelFilter{tw: tw, cmdline: cmdline, kernel: kernelName, tar: tarName, ucode: ucodeName}
}
func (k *kernelFilter) finishTar() error {
if k.buffer == nil {
return nil
}
tr := tar.NewReader(k.buffer)
err := tarAppend(k.tw, tr)
k.buffer = nil
return err
}
func (k *kernelFilter) Close() error {
if !k.foundKernel {
return errors.New("did not find kernel in kernel image")
}
if !k.foundKTar && k.tar != "" {
return errors.New("did not find kernel tar in kernel image")
}
return k.finishTar()
}
func (k *kernelFilter) Flush() error {
err := k.finishTar()
if err != nil {
return err
}
return k.tw.Flush()
}
func (k *kernelFilter) Write(b []byte) (n int, err error) {
if k.discard {
return len(b), nil
}
if k.buffer != nil {
return k.buffer.Write(b)
}
return k.tw.Write(b)
}
func (k *kernelFilter) WriteHeader(hdr *tar.Header) error {
err := k.finishTar()
if err != nil {
return err
}
tw := k.tw
switch hdr.Name {
case k.kernel:
if k.foundKernel {
return errors.New("found more than one possible kernel image")
}
k.foundKernel = true
k.discard = false
// If we handled the ucode, /boot already exist.
if !k.foundUCode {
whdr := &tar.Header{
Name: "boot",
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(whdr); err != nil {
return err
}
}
// add the cmdline in /boot/cmdline
whdr := &tar.Header{
Name: "boot/cmdline",
Mode: 0644,
Size: int64(len(k.cmdline)),
}
if err := tw.WriteHeader(whdr); err != nil {
return err
}
buf := bytes.NewBufferString(k.cmdline)
_, err = io.Copy(tw, buf)
if err != nil {
return err
}
whdr = &tar.Header{
Name: "boot/kernel",
Mode: hdr.Mode,
Size: hdr.Size,
}
if err := tw.WriteHeader(whdr); err != nil {
return err
}
case k.tar:
k.foundKTar = true
k.discard = false
k.buffer = new(bytes.Buffer)
case k.ucode:
k.foundUCode = true
k.discard = false
// If we handled the kernel, /boot already exist.
if !k.foundKernel {
whdr := &tar.Header{
Name: "boot",
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(whdr); err != nil {
return err
}
}
whdr := &tar.Header{
Name: "boot/ucode.cpio",
Mode: hdr.Mode,
Size: hdr.Size,
}
if err := tw.WriteHeader(whdr); err != nil {
return err
}
default:
k.discard = true
}
return nil
}
func tarAppend(iw *tar.Writer, tr *tar.Reader) error {
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = iw.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(iw, tr)
if err != nil {
return err
}
}
return nil
}
// this allows inserting metadata into a file in the image
func metadata(m Moby, md string) ([]byte, error) {
// Make sure the Image strings are update to date with the refs
updateImages(&m)
switch md {
case "json":
return json.MarshalIndent(m, "", " ")
case "yaml":
return yaml.Marshal(m)
default:
return []byte{}, fmt.Errorf("Unsupported metadata type: %s", md)
}
}
func filesystem(m Moby, tw *tar.Writer, idMap map[string]uint32) error {
// TODO also include the files added in other parts of the build
var addedFiles = map[string]bool{}
if len(m.Files) != 0 {
log.Infof("Add files:")
}
for _, f := range m.Files {
log.Infof(" %s", f.Path)
if f.Path == "" {
return errors.New("Did not specify path for file")
}
// tar archives should not have absolute paths
if f.Path[0] == os.PathSeparator {
f.Path = f.Path[1:]
}
mode := int64(0600)
if f.Directory {
mode = 0700
}
if f.Mode != "" {
var err error
mode, err = strconv.ParseInt(f.Mode, 8, 32)
if err != nil {
return fmt.Errorf("Cannot parse file mode as octal value: %v", err)
}
}
dirMode := mode
if dirMode&0700 != 0 {
dirMode |= 0100
}
if dirMode&0070 != 0 {
dirMode |= 0010
}
if dirMode&0007 != 0 {
dirMode |= 0001
}
uid, err := idNumeric(f.UID, idMap)
if err != nil {
return err
}
gid, err := idNumeric(f.GID, idMap)
if err != nil {
return err
}
var contents []byte
if f.Contents != nil {
contents = []byte(*f.Contents)
}
if !f.Directory && f.Symlink == "" && f.Contents == nil {
if f.Source == "" && f.Metadata == "" {
return fmt.Errorf("Contents of file (%s) not specified", f.Path)
}
if f.Source != "" && f.Metadata != "" {
return fmt.Errorf("Specified Source and Metadata for file: %s", f.Path)
}
if f.Source != "" {
source := f.Source
if len(source) > 2 && source[:2] == "~/" {
source = homeDir() + source[1:]
}
if f.Optional {
_, err := os.Stat(source)
if err != nil {
// skip if not found or readable
log.Debugf("Skipping file [%s] as not readable and marked optional", source)
continue
}
}
var err error
contents, err = ioutil.ReadFile(source)
if err != nil {
return err
}
} else {
contents, err = metadata(m, f.Metadata)
if err != nil {
return err
}
}
} else {
if f.Metadata != "" {
return fmt.Errorf("Specified Contents and Metadata for file: %s", f.Path)
}
if f.Source != "" {
return fmt.Errorf("Specified Contents and Source for file: %s", f.Path)
}
}
// we need all the leading directories
parts := strings.Split(path.Dir(f.Path), "/")
root := ""
for _, p := range parts {
if p == "." || p == "/" {
continue
}
if root == "" {
root = p
} else {
root = root + "/" + p
}
if !addedFiles[root] {
hdr := &tar.Header{
Name: root,
Typeflag: tar.TypeDir,
Mode: dirMode,
Uid: int(uid),
Gid: int(gid),
}
err := tw.WriteHeader(hdr)
if err != nil {
return err
}
addedFiles[root] = true
}
}
addedFiles[f.Path] = true
hdr := &tar.Header{
Name: f.Path,
Mode: mode,
Uid: int(uid),
Gid: int(gid),
}
if f.Directory {
if f.Contents != nil {
return errors.New("Directory with contents not allowed")
}
hdr.Typeflag = tar.TypeDir
err := tw.WriteHeader(hdr)
if err != nil {
return err
}
} else if f.Symlink != "" {
hdr.Typeflag = tar.TypeSymlink
hdr.Linkname = f.Symlink
err := tw.WriteHeader(hdr)
if err != nil {
return err
}
} else {
hdr.Size = int64(len(contents))
err := tw.WriteHeader(hdr)
if err != nil {
return err
}
_, err = tw.Write(contents)
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,304 +0,0 @@
package moby
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"github.com/containerd/containerd/reference"
"github.com/opencontainers/runtime-spec/specs-go"
log "github.com/sirupsen/logrus"
)
type tarWriter interface {
Close() error
Flush() error
Write(b []byte) (n int, err error)
WriteHeader(hdr *tar.Header) error
}
// This uses Docker to convert a Docker image into a tarball. It would be an improvement if we
// used the containerd libraries to do this instead locally direct from a local image
// cache as it would be much simpler.
// Unfortunately there are some files that Docker always makes appear in a running image and
// export shows them. In particular we have no way for a user to specify their own resolv.conf.
// Even if we were not using docker export to get the image, users of docker build cannot override
// the resolv.conf either, as it is not writeable and bind mounted in.
var exclude = map[string]bool{
".dockerenv": true,
"Dockerfile": true,
"dev/console": true,
"dev/pts": true,
"dev/shm": true,
"etc/hostname": true,
}
var replace = map[string]string{
"etc/hosts": `127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
`,
"etc/resolv.conf": `
# no resolv.conf configured
`,
}
// tarPrefix creates the leading directories for a path
func tarPrefix(path string, tw tarWriter) error {
if path == "" {
return nil
}
if path[len(path)-1] != byte('/') {
return fmt.Errorf("path does not end with /: %s", path)
}
path = path[:len(path)-1]
if path[0] == byte('/') {
return fmt.Errorf("path should be relative: %s", path)
}
mkdir := ""
for _, dir := range strings.Split(path, "/") {
mkdir = mkdir + dir
hdr := &tar.Header{
Name: mkdir,
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
mkdir = mkdir + "/"
}
return nil
}
// ImageTar takes a Docker image and outputs it to a tar stream
func ImageTar(ref *reference.Spec, prefix string, tw tarWriter, trust bool, pull bool, resolv string) (e error) {
log.Debugf("image tar: %s %s", ref, prefix)
if prefix != "" && prefix[len(prefix)-1] != byte('/') {
return fmt.Errorf("prefix does not end with /: %s", prefix)
}
err := tarPrefix(prefix, tw)
if err != nil {
return err
}
if pull || trust {
err := dockerPull(ref, pull, trust)
if err != nil {
return fmt.Errorf("Could not pull image %s: %v", ref, err)
}
}
container, err := dockerCreate(ref.String())
if err != nil {
// if the image wasn't found, pull it down. Bail on other errors.
if strings.Contains(err.Error(), "No such image") {
err := dockerPull(ref, true, trust)
if err != nil {
return fmt.Errorf("Could not pull image %s: %v", ref, err)
}
container, err = dockerCreate(ref.String())
if err != nil {
return fmt.Errorf("Failed to docker create image %s: %v", ref, err)
}
} else {
return fmt.Errorf("Failed to create docker image %s: %v", ref, err)
}
}
contents, err := dockerExport(container)
if err != nil {
return fmt.Errorf("Failed to docker export container from container %s: %v", container, err)
}
defer func() {
contents.Close()
if err := dockerRm(container); e == nil && err != nil {
e = fmt.Errorf("Failed to docker rm container %s: %v", container, err)
}
}()
// now we need to filter out some files from the resulting tar archive
tr := tar.NewReader(contents)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if exclude[hdr.Name] {
log.Debugf("image tar: %s %s exclude %s", ref, prefix, hdr.Name)
_, err = io.Copy(ioutil.Discard, tr)
if err != nil {
return err
}
} else if replace[hdr.Name] != "" {
if hdr.Name != "etc/resolv.conf" || resolv == "" {
contents := replace[hdr.Name]
hdr.Size = int64(len(contents))
hdr.Name = prefix + hdr.Name
log.Debugf("image tar: %s %s add %s", ref, prefix, hdr.Name)
if err := tw.WriteHeader(hdr); err != nil {
return err
}
buf := bytes.NewBufferString(contents)
_, err = io.Copy(tw, buf)
if err != nil {
return err
}
} else {
// replace resolv.conf with specified symlink
hdr.Name = prefix + hdr.Name
hdr.Size = 0
hdr.Typeflag = tar.TypeSymlink
hdr.Linkname = resolv
log.Debugf("image tar: %s %s add resolv symlink /etc/resolv.conf -> %s", ref, prefix, resolv)
if err := tw.WriteHeader(hdr); err != nil {
return err
}
}
_, err = io.Copy(ioutil.Discard, tr)
if err != nil {
return err
}
} else {
log.Debugf("image tar: %s %s add %s", ref, prefix, hdr.Name)
hdr.Name = prefix + hdr.Name
if hdr.Typeflag == tar.TypeLink {
// hard links are referenced by full path so need to be adjusted
hdr.Linkname = prefix + hdr.Linkname
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
_, err = io.Copy(tw, tr)
if err != nil {
return err
}
}
}
return nil
}
// ImageBundle produces an OCI bundle at the given path in a tarball, given an image and a config.json
func ImageBundle(prefix string, ref *reference.Spec, config []byte, runtime Runtime, tw tarWriter, trust bool, pull bool, readonly bool, dupMap map[string]string) error { // nolint: lll
// if read only, just unpack in rootfs/ but otherwise set up for overlay
rootExtract := "rootfs"
if !readonly {
rootExtract = "lower"
}
// See if we have extracted this image previously
root := path.Join(prefix, rootExtract)
var foundElsewhere = dupMap[ref.String()] != ""
if !foundElsewhere {
if err := ImageTar(ref, root+"/", tw, trust, pull, ""); err != nil {
return err
}
dupMap[ref.String()] = root
} else {
if err := tarPrefix(prefix+"/", tw); err != nil {
return err
}
root = dupMap[ref.String()]
}
hdr := &tar.Header{
Name: path.Join(prefix, "config.json"),
Mode: 0644,
Size: int64(len(config)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
buf := bytes.NewBuffer(config)
if _, err := io.Copy(tw, buf); err != nil {
return err
}
var rootfsMounts []specs.Mount
if !readonly {
// add a tmp directory to be used as a mount point for tmpfs for upper, work
tmp := path.Join(prefix, "tmp")
hdr = &tar.Header{
Name: tmp,
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
// add rootfs as merged mount point
hdr = &tar.Header{
Name: path.Join(prefix, "rootfs"),
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
overlayOptions := []string{"lowerdir=/" + root, "upperdir=/" + path.Join(tmp, "upper"), "workdir=/" + path.Join(tmp, "work")}
rootfsMounts = []specs.Mount{
{Source: "tmpfs", Type: "tmpfs", Destination: "/" + tmp},
// remount private as nothing else should see the temporary layers
{Destination: "/" + tmp, Options: []string{"remount", "private"}},
{Source: "overlay", Type: "overlay", Destination: "/" + path.Join(prefix, "rootfs"), Options: overlayOptions},
}
} else {
if foundElsewhere {
// we need to make the mountpoint at rootfs
hdr = &tar.Header{
Name: path.Join(prefix, "rootfs"),
Mode: 0755,
Typeflag: tar.TypeDir,
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
}
// either bind from another location, or bind from self to make sure it is a mountpoint as runc prefers this
rootfsMounts = []specs.Mount{
{Source: "/" + root, Destination: "/" + path.Join(prefix, "rootfs"), Options: []string{"bind"}},
}
}
// Prepend the rootfs onto the user specified mounts.
runtimeMounts := append(rootfsMounts, *runtime.Mounts...)
runtime.Mounts = &runtimeMounts
// write the runtime config
runtimeConfig, err := json.MarshalIndent(runtime, "", " ")
if err != nil {
return fmt.Errorf("Failed to create runtime config for %s: %v", ref, err)
}
hdr = &tar.Header{
Name: path.Join(prefix, "runtime.json"),
Mode: 0644,
Size: int64(len(runtimeConfig)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
buf = bytes.NewBuffer(runtimeConfig)
if _, err := io.Copy(tw, buf); err != nil {
return err
}
log.Debugf("image bundle: %s %s cfg: %s runtime: %s", prefix, ref, string(config), string(runtimeConfig))
return nil
}

View File

@@ -1,492 +0,0 @@
package moby
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strings"
"github.com/moby/tool/src/initrd"
log "github.com/sirupsen/logrus"
)
var (
outputImages = map[string]string{
"iso-bios": "linuxkit/mkimage-iso-bios:9a51dc64a461f1cc50ba05f30a38f73f5227ac03",
"iso-efi": "linuxkit/mkimage-iso-efi:343cf1a8ac0aba7d8a1f13b7f45fa0b57ab897dc",
"raw-bios": "linuxkit/mkimage-raw-bios:d90713b2dd610cf9a0f5f9d9095f8bf86f40d5c6",
"raw-efi": "linuxkit/mkimage-raw-efi:8938ffb6014543e557b624a40cce1714f30ce4b6",
"squashfs": "linuxkit/mkimage-squashfs:b44d00b0a336fd32c122ff32bd2b39c36a965135",
"gcp": "linuxkit/mkimage-gcp:e6cdcf859ab06134c0c37a64ed5f886ec8dae1a1",
"qcow2-efi": "linuxkit/mkimage-qcow2-efi:787b54906e14a56b9f1da35dcc8e46bd58435285",
"vhd": "linuxkit/mkimage-vhd:3820219e5c350fe8ab2ec6a217272ae82f4b9242",
"dynamic-vhd": "linuxkit/mkimage-dynamic-vhd:743ac9959fe6d3912ebd78b4fd490b117c53f1a6",
"vmdk": "linuxkit/mkimage-vmdk:cee81a3ed9c44ae446ef7ebff8c42c1e77b3e1b5",
"rpi3": "linuxkit/mkimage-rpi3:0f23c4f37cdca99281ca33ac6188e1942fa7a2b8",
}
)
// UpdateOutputImages overwrite the docker images used to build the outputs
// 'update' is a map where the key is the output format and the value is a LinuxKit 'mkimage' image.
func UpdateOutputImages(update map[string]string) error {
for k, img := range update {
if _, ok := outputImages[k]; !ok {
return fmt.Errorf("Image format %s is not known", k)
}
outputImages[k] = img
}
return nil
}
var outFuns = map[string]func(string, io.Reader, int) error{
"kernel+initrd": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, ucode, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputKernelInitrd(base, kernel, initrd, cmdline, ucode)
if err != nil {
return fmt.Errorf("Error writing kernel+initrd output: %v", err)
}
return nil
},
"tar-kernel-initrd": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, ucode, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
if err := outputKernelInitrdTarball(base, kernel, initrd, cmdline, ucode); err != nil {
return fmt.Errorf("Error writing kernel+initrd tarball output: %v", err)
}
return nil
},
"iso-bios": func(base string, image io.Reader, size int) error {
err := outputIso(outputImages["iso-bios"], base+".iso", image)
if err != nil {
return fmt.Errorf("Error writing iso-bios output: %v", err)
}
return nil
},
"iso-efi": func(base string, image io.Reader, size int) error {
err := outputIso(outputImages["iso-efi"], base+"-efi.iso", image)
if err != nil {
return fmt.Errorf("Error writing iso-efi output: %v", err)
}
return nil
},
"raw-bios": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
// TODO: Handle ucode
err = outputImg(outputImages["raw-bios"], base+"-bios.img", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing raw-bios output: %v", err)
}
return nil
},
"raw-efi": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["raw-efi"], base+"-efi.img", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing raw-efi output: %v", err)
}
return nil
},
"kernel+squashfs": func(base string, image io.Reader, size int) error {
err := outputKernelSquashFS(outputImages["squashfs"], base, image)
if err != nil {
return fmt.Errorf("Error writing kernel+squashfs output: %v", err)
}
return nil
},
"aws": func(base string, image io.Reader, size int) error {
filename := base + ".raw"
log.Infof(" %s", filename)
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputLinuxKit("raw", filename, kernel, initrd, cmdline, size)
if err != nil {
return fmt.Errorf("Error writing raw output: %v", err)
}
return nil
},
"gcp": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["gcp"], base+".img.tar.gz", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing gcp output: %v", err)
}
return nil
},
"qcow2-efi": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["qcow2-efi"], base+"-efi.qcow2", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing qcow2 EFI output: %v", err)
}
return nil
},
"qcow2-bios": func(base string, image io.Reader, size int) error {
filename := base + ".qcow2"
log.Infof(" %s", filename)
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
// TODO: Handle ucode
err = outputLinuxKit("qcow2", filename, kernel, initrd, cmdline, size)
if err != nil {
return fmt.Errorf("Error writing qcow2 output: %v", err)
}
return nil
},
"vhd": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["vhd"], base+".vhd", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing vhd output: %v", err)
}
return nil
},
"dynamic-vhd": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["dynamic-vhd"], base+".vhd", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing vhd output: %v", err)
}
return nil
},
"vmdk": func(base string, image io.Reader, size int) error {
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
err = outputImg(outputImages["vmdk"], base+".vmdk", kernel, initrd, cmdline)
if err != nil {
return fmt.Errorf("Error writing vmdk output: %v", err)
}
return nil
},
"rpi3": func(base string, image io.Reader, size int) error {
if runtime.GOARCH != "arm64" {
return fmt.Errorf("Raspberry Pi output currently only supported on arm64")
}
err := outputRPi3(outputImages["rpi3"], base+".tar", image)
if err != nil {
return fmt.Errorf("Error writing rpi3 output: %v", err)
}
return nil
},
}
var prereq = map[string]string{
"aws": "mkimage",
"qcow2-bios": "mkimage",
}
func ensurePrereq(out string) error {
var err error
p := prereq[out]
if p != "" {
err = ensureLinuxkitImage(p)
}
return err
}
// ValidateFormats checks if the format type is known
func ValidateFormats(formats []string) error {
log.Debugf("validating output: %v", formats)
for _, o := range formats {
f := outFuns[o]
if f == nil {
return fmt.Errorf("Unknown format type %s", o)
}
err := ensurePrereq(o)
if err != nil {
return fmt.Errorf("Failed to set up format type %s: %v", o, err)
}
}
return nil
}
// Formats generates all the specified output formats
func Formats(base string, image string, formats []string, size int) error {
log.Debugf("format: %v %s", formats, base)
err := ValidateFormats(formats)
if err != nil {
return err
}
for _, o := range formats {
ir, err := os.Open(image)
if err != nil {
return err
}
defer ir.Close()
f := outFuns[o]
if err := f(base, ir, size); err != nil {
return err
}
}
return nil
}
func tarToInitrd(r io.Reader) ([]byte, []byte, string, []byte, error) {
w := new(bytes.Buffer)
iw := initrd.NewWriter(w)
tr := tar.NewReader(r)
kernel, cmdline, ucode, err := initrd.CopySplitTar(iw, tr)
if err != nil {
return []byte{}, []byte{}, "", []byte{}, err
}
iw.Close()
return kernel, w.Bytes(), cmdline, ucode, nil
}
func tarInitrdKernel(kernel, initrd []byte, cmdline string) (*bytes.Buffer, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
hdr := &tar.Header{
Name: "kernel",
Mode: 0600,
Size: int64(len(kernel)),
}
err := tw.WriteHeader(hdr)
if err != nil {
return buf, err
}
_, err = tw.Write(kernel)
if err != nil {
return buf, err
}
hdr = &tar.Header{
Name: "initrd.img",
Mode: 0600,
Size: int64(len(initrd)),
}
err = tw.WriteHeader(hdr)
if err != nil {
return buf, err
}
_, err = tw.Write(initrd)
if err != nil {
return buf, err
}
hdr = &tar.Header{
Name: "cmdline",
Mode: 0600,
Size: int64(len(cmdline)),
}
err = tw.WriteHeader(hdr)
if err != nil {
return buf, err
}
_, err = tw.Write([]byte(cmdline))
if err != nil {
return buf, err
}
return buf, tw.Close()
}
func outputImg(image, filename string, kernel []byte, initrd []byte, cmdline string) error {
log.Debugf("output img: %s %s", image, filename)
log.Infof(" %s", filename)
buf, err := tarInitrdKernel(kernel, initrd, cmdline)
if err != nil {
return err
}
output, err := os.Create(filename)
if err != nil {
return err
}
defer output.Close()
return dockerRun(buf, output, true, image, cmdline)
}
func outputIso(image, filename string, filesystem io.Reader) error {
log.Debugf("output ISO: %s %s", image, filename)
log.Infof(" %s", filename)
output, err := os.Create(filename)
if err != nil {
return err
}
defer output.Close()
return dockerRun(filesystem, output, true, image)
}
func outputRPi3(image, filename string, filesystem io.Reader) error {
log.Debugf("output RPi3: %s %s", image, filename)
log.Infof(" %s", filename)
output, err := os.Create(filename)
if err != nil {
return err
}
defer output.Close()
return dockerRun(filesystem, output, true, image)
}
func outputKernelInitrd(base string, kernel []byte, initrd []byte, cmdline string, ucode []byte) error {
log.Debugf("output kernel/initrd: %s %s", base, cmdline)
if len(ucode) != 0 {
log.Infof(" %s ucode+%s %s", base+"-kernel", base+"-initrd.img", base+"-cmdline")
if err := ioutil.WriteFile(base+"-initrd.img", ucode, os.FileMode(0644)); err != nil {
return err
}
f, err := os.OpenFile(base+"-initrd.img", os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer f.Close()
if _, err = f.Write(initrd); err != nil {
return err
}
} else {
log.Infof(" %s %s %s", base+"-kernel", base+"-initrd.img", base+"-cmdline")
if err := ioutil.WriteFile(base+"-initrd.img", initrd, os.FileMode(0644)); err != nil {
return err
}
}
if err := ioutil.WriteFile(base+"-kernel", kernel, os.FileMode(0644)); err != nil {
return err
}
return ioutil.WriteFile(base+"-cmdline", []byte(cmdline), os.FileMode(0644))
}
func outputKernelInitrdTarball(base string, kernel []byte, initrd []byte, cmdline string, ucode []byte) error {
log.Debugf("output kernel/initrd tarball: %s %s", base, cmdline)
log.Infof(" %s", base+"-initrd.tar")
f, err := os.Create(base + "-initrd.tar")
if err != nil {
return err
}
defer f.Close()
tw := tar.NewWriter(f)
hdr := &tar.Header{
Name: "kernel",
Mode: 0644,
Size: int64(len(kernel)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write(kernel); err != nil {
return err
}
hdr = &tar.Header{
Name: "initrd.img",
Mode: 0644,
Size: int64(len(initrd)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write(initrd); err != nil {
return err
}
hdr = &tar.Header{
Name: "cmdline",
Mode: 0644,
Size: int64(len(cmdline)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write([]byte(cmdline)); err != nil {
return err
}
if len(ucode) != 0 {
hdr := &tar.Header{
Name: "ucode.cpio",
Mode: 0644,
Size: int64(len(ucode)),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := tw.Write(ucode); err != nil {
return err
}
}
return tw.Close()
}
func outputKernelSquashFS(image, base string, filesystem io.Reader) error {
log.Debugf("output kernel/squashfs: %s %s", image, base)
log.Infof(" %s-squashfs.img", base)
tr := tar.NewReader(filesystem)
buf := new(bytes.Buffer)
rootfs := tar.NewWriter(buf)
for {
var thdr *tar.Header
thdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch {
case thdr.Name == "boot/kernel":
kernel, err := ioutil.ReadAll(tr)
if err != nil {
return err
}
if err := ioutil.WriteFile(base+"-kernel", kernel, os.FileMode(0644)); err != nil {
return err
}
case thdr.Name == "boot/cmdline":
cmdline, err := ioutil.ReadAll(tr)
if err != nil {
return err
}
if err := ioutil.WriteFile(base+"-cmdline", cmdline, os.FileMode(0644)); err != nil {
return err
}
case strings.HasPrefix(thdr.Name, "boot/"):
// skip the rest of boot/
default:
rootfs.WriteHeader(thdr)
if _, err := io.Copy(rootfs, tr); err != nil {
return err
}
}
}
rootfs.Close()
output, err := os.Create(base + "-squashfs.img")
if err != nil {
return err
}
defer output.Close()
return dockerRun(buf, output, true, image)
}

View File

@@ -1,24 +0,0 @@
github.com/agl/ed25519 278e1ec8e8a6e017cd07577924d6766039146ced
github.com/containerd/containerd v1.0.0
github.com/docker/distribution 3800056b8832cf6075e78b282ac010131d8687bc
github.com/docker/docker ba99c19b593bdb9e7b90793681fe89b0a91781ed
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
github.com/docker/go-connections v0.3.0
github.com/docker/go-units v0.3.1
github.com/gogo/protobuf v0.5
github.com/gorilla/mux v1.6.1
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runtime-spec v1.0.0
github.com/pkg/errors v0.8.0
github.com/sirupsen/logrus v1.0.3
github.com/surma/gocpio fcb68777e7dc4ea43ffce871b552c0d073c17495
github.com/theupdateframework/notary v0.6.1
github.com/xeipuuv/gojsonpointer 6fe8760cad3569743d51ddbb243b26f8456742dc
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
github.com/xeipuuv/gojsonschema 702b404897d4364af44dc8dcabc9815947942325
golang.org/x/crypto 573951cbe80bb6352881271bb276f48749eab6f4
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
gopkg.in/yaml.v2 v2.2.1
github.com/Microsoft/go-winio v0.4.1

File diff suppressed because it is too large Load Diff

View File

@@ -1,193 +0,0 @@
package moby
// We want to replace much of this with use of containerd tools
// and also using the Docker API not shelling out
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/containerd/containerd/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
func dockerRun(input io.Reader, output io.Writer, trust bool, img string, args ...string) error {
log.Debugf("docker run %s (trust=%t) (input): %s", img, trust, strings.Join(args, " "))
docker, err := exec.LookPath("docker")
if err != nil {
return errors.New("Docker does not seem to be installed")
}
env := os.Environ()
if trust {
env = append(env, "DOCKER_CONTENT_TRUST=1")
}
// Pull first to avoid https://github.com/docker/cli/issues/631
pull := exec.Command(docker, "pull", img)
pull.Env = env
if err := pull.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("docker pull %s failed: %v output:\n%s", img, err, exitError.Stderr)
}
return err
}
args = append([]string{"run", "--network=none", "--rm", "-i", img}, args...)
cmd := exec.Command(docker, args...)
cmd.Stdin = input
cmd.Stdout = output
cmd.Env = env
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("docker run %s failed: %v output:\n%s", img, err, exitError.Stderr)
}
return err
}
log.Debugf("docker run %s (input): %s...Done", img, strings.Join(args, " "))
return nil
}
func dockerCreate(image string) (string, error) {
log.Debugf("docker create: %s", image)
cli, err := dockerClient()
if err != nil {
return "", errors.New("could not initialize Docker API client")
}
// we do not ever run the container, so /dev/null is used as command
config := &container.Config{
Cmd: []string{"/dev/null"},
Image: image,
}
respBody, err := cli.ContainerCreate(context.Background(), config, nil, nil, "")
if err != nil {
return "", err
}
log.Debugf("docker create: %s...Done", image)
return respBody.ID, nil
}
func dockerExport(container string) (io.ReadCloser, error) {
log.Debugf("docker export: %s", container)
cli, err := dockerClient()
if err != nil {
return nil, errors.New("could not initialize Docker API client")
}
responseBody, err := cli.ContainerExport(context.Background(), container)
if err != nil {
return nil, err
}
return responseBody, err
}
func dockerRm(container string) error {
log.Debugf("docker rm: %s", container)
cli, err := dockerClient()
if err != nil {
return errors.New("could not initialize Docker API client")
}
if err = cli.ContainerRemove(context.Background(), container, types.ContainerRemoveOptions{}); err != nil {
return err
}
log.Debugf("docker rm: %s...Done", container)
return nil
}
func dockerPull(ref *reference.Spec, forcePull, trustedPull bool) error {
log.Debugf("docker pull: %s", ref)
cli, err := dockerClient()
if err != nil {
return errors.New("could not initialize Docker API client")
}
if trustedPull {
log.Debugf("pulling %s with content trust", ref)
trustedImg, err := TrustedReference(ref.String())
if err != nil {
return fmt.Errorf("Trusted pull for %s failed: %v", ref, err)
}
// tag the image on a best-effort basis after pulling with content trust,
// ensuring that docker picks up the tag and digest fom the canonical format
defer func(src, dst string) {
if err := cli.ImageTag(context.Background(), src, dst); err != nil {
log.Debugf("could not tag trusted image %s to %s", src, dst)
}
}(trustedImg.String(), ref.String())
log.Debugf("successfully verified trusted reference %s from notary", trustedImg.String())
trustedSpec, err := reference.Parse(trustedImg.String())
if err != nil {
return fmt.Errorf("failed to convert trusted img %s to Spec: %v", trustedImg, err)
}
ref.Locator = trustedSpec.Locator
ref.Object = trustedSpec.Object
imageSearchArg := filters.NewArgs()
imageSearchArg.Add("reference", trustedImg.String())
if _, err := cli.ImageList(context.Background(), types.ImageListOptions{Filters: imageSearchArg}); err == nil && !forcePull {
log.Debugf("docker pull: trusted image %s already cached...Done", trustedImg.String())
return nil
}
}
log.Infof("Pull image: %s", ref)
r, err := cli.ImagePull(context.Background(), ref.String(), types.ImagePullOptions{})
if err != nil {
return err
}
defer r.Close()
_, err = io.Copy(ioutil.Discard, r)
if err != nil {
return err
}
log.Debugf("docker pull: %s...Done", ref)
return nil
}
func dockerClient() (*client.Client, error) {
// for maximum compatibility as we use nothing new
err := os.Setenv("DOCKER_API_VERSION", "1.23")
if err != nil {
return nil, err
}
return client.NewEnvClient()
}
func dockerInspectImage(cli *client.Client, ref *reference.Spec, trustedPull bool) (types.ImageInspect, error) {
log.Debugf("docker inspect image: %s", ref)
inspect, _, err := cli.ImageInspectWithRaw(context.Background(), ref.String())
if err != nil {
if client.IsErrNotFound(err) {
pullErr := dockerPull(ref, true, trustedPull)
if pullErr != nil {
return types.ImageInspect{}, pullErr
}
inspect, _, err = cli.ImageInspectWithRaw(context.Background(), ref.String())
if err != nil {
return types.ImageInspect{}, err
}
} else {
return types.ImageInspect{}, err
}
}
log.Debugf("docker inspect image: %s...Done", ref)
return inspect, nil
}

View File

@@ -1,142 +0,0 @@
package moby
import (
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
log "github.com/sirupsen/logrus"
)
var linuxkitYaml = map[string]string{"mkimage": `
kernel:
image: linuxkit/kernel:4.9.39
cmdline: "console=ttyS0"
init:
- linuxkit/init:00ab58c9681a0bf42b2e35134c1ccf1591ebb64d
- linuxkit/runc:f5960b83a8766ae083efc744fa63dbf877450e4f
onboot:
- name: mkimage
image: linuxkit/mkimage:50bde8b00eb82e08f12dd9cc29f36c77f5638426
- name: poweroff
image: linuxkit/poweroff:3845c4d64d47a1ea367806be5547e44594b0fa91
trust:
org:
- linuxkit
`}
func imageFilename(name string) string {
yaml := linuxkitYaml[name]
hash := sha256.Sum256([]byte(yaml))
return filepath.Join(MobyDir, "linuxkit", name+"-"+fmt.Sprintf("%x", hash))
}
func ensureLinuxkitImage(name string) error {
filename := imageFilename(name)
_, err1 := os.Stat(filename + "-kernel")
_, err2 := os.Stat(filename + "-initrd.img")
_, err3 := os.Stat(filename + "-cmdline")
if err1 == nil && err2 == nil && err3 == nil {
return nil
}
err := os.MkdirAll(filepath.Join(MobyDir, "linuxkit"), 0755)
if err != nil {
return err
}
// TODO clean up old files
log.Infof("Building LinuxKit image %s to generate output formats", name)
yaml := linuxkitYaml[name]
m, err := NewConfig([]byte(yaml))
if err != nil {
return err
}
// TODO pass through --pull to here
tf, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(tf.Name())
Build(m, tf, false, "")
if err := tf.Close(); err != nil {
return err
}
image, err := os.Open(tf.Name())
if err != nil {
return err
}
defer image.Close()
kernel, initrd, cmdline, _, err := tarToInitrd(image)
if err != nil {
return fmt.Errorf("Error converting to initrd: %v", err)
}
return writeKernelInitrd(filename, kernel, initrd, cmdline)
}
func writeKernelInitrd(filename string, kernel []byte, initrd []byte, cmdline string) error {
err := ioutil.WriteFile(filename+"-kernel", kernel, 0600)
if err != nil {
return err
}
err = ioutil.WriteFile(filename+"-initrd.img", initrd, 0600)
if err != nil {
return err
}
return ioutil.WriteFile(filename+"-cmdline", []byte(cmdline), 0600)
}
func outputLinuxKit(format string, filename string, kernel []byte, initrd []byte, cmdline string, size int) error {
log.Debugf("output linuxkit generated img: %s %s size %d", format, filename, size)
tmp, err := ioutil.TempDir(filepath.Join(MobyDir, "tmp"), "moby")
if err != nil {
return err
}
defer os.RemoveAll(tmp)
buf, err := tarInitrdKernel(kernel, initrd, cmdline)
if err != nil {
return err
}
tardisk := filepath.Join(tmp, "tardisk")
f, err := os.Create(tardisk)
if err != nil {
return err
}
_, err = io.Copy(f, buf)
if err != nil {
return err
}
err = f.Close()
if err != nil {
return err
}
sizeString := fmt.Sprintf("%dM", size)
_ = os.Remove(filename)
_, err = os.Stat(filename)
if err == nil || !os.IsNotExist(err) {
return fmt.Errorf("Cannot remove existing file [%s]", filename)
}
linuxkit, err := exec.LookPath("linuxkit")
if err != nil {
return fmt.Errorf("Cannot find linuxkit executable, needed to build %s output type: %v", format, err)
}
commandLine := []string{
"-q", "run", "qemu",
"-disk", fmt.Sprintf("%s,size=%s,format=%s", filename, sizeString, format),
"-disk", fmt.Sprintf("%s,format=raw", tardisk),
"-kernel", imageFilename("mkimage"),
}
log.Debugf("run %s: %v", linuxkit, commandLine)
cmd := exec.Command(linuxkit, commandLine...)
cmd.Stderr = os.Stderr
return cmd.Run()
}

View File

@@ -1,313 +0,0 @@
package moby
var schema = string(`
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Moby Config",
"additionalProperties": false,
"definitions": {
"kernel": {
"type": "object",
"additionalProperties": false,
"properties": {
"image": {"type": "string"},
"cmdline": {"type": "string"},
"binary": {"type": "string"},
"tar": {"type": "string"},
"ucode": {"type": "string"}
}
},
"file": {
"type": "object",
"additionalProperties": false,
"properties": {
"path": {"type": "string"},
"directory": {"type": "boolean"},
"symlink": {"type": "string"},
"contents": {"type": "string"},
"source": {"type": "string"},
"metadata": {"type": "string"},
"optional": {"type": "boolean"},
"mode": {"type": "string"},
"uid": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
"gid": {"anyOf": [{"type": "string"}, {"type": "integer"}]}
}
},
"files": {
"type": "array",
"items": { "$ref": "#/definitions/file" }
},
"trust": {
"type": "object",
"additionalProperties": false,
"properties": {
"image": { "$ref": "#/definitions/strings" },
"org": { "$ref": "#/definitions/strings" }
}
},
"strings": {
"type": "array",
"items": {"type": "string"}
},
"mapstring": {
"type": "object",
"additionalProperties": {"type": "string"}
},
"mount": {
"type": "object",
"additionalProperties": false,
"properties": {
"destination": { "type": "string" },
"type": { "type": "string" },
"source": { "type": "string" },
"options": { "$ref": "#/definitions/strings" }
}
},
"mounts": {
"type": "array",
"items": { "$ref": "#/definitions/mount" }
},
"idmapping": {
"type": "object",
"additionalProperties": false,
"properties": {
"hostID": { "type": "integer" },
"containerID": { "type": "integer" },
"size": { "type": "integer" }
}
},
"idmappings": {
"type": "array",
"items": { "$ref": "#/definitions/idmapping" }
},
"devicecgroups": {
"type": "array",
"items": { "$ref": "#/definitions/devicecgroup" }
},
"devicecgroup": {
"type": "object",
"additionalProperties": false,
"properties": {
"allow": {"type": "boolean"},
"type": {"type": "string"},
"major": {"type": "integer"},
"minor": {"type": "integer"},
"access": {"type": "string"}
}
},
"memory": {
"type": "object",
"additionalProperties": false,
"properties": {
"limit": {"type": "integer"},
"reservation": {"type": "integer"},
"swap": {"type": "integer"},
"kernel": {"type": "integer"},
"kernelTCP": {"type": "integer"},
"swappiness": {"type": "integer"},
"disableOOMKiller": {"type": "boolean"}
}
},
"cpu": {
"type": "object",
"additionalProperties": false,
"properties": {
"shares": {"type": "integer"},
"quota": {"type": "integer"},
"period": {"type": "integer"},
"realtimeRuntime": {"type": "integer"},
"realtimePeriod": {"type": "integer"},
"cpus": {"type": "string"},
"mems": {"type": "string"}
}
},
"pids": {
"type": "object",
"additionalProperties": false,
"properties": {
"limit": {"type": "integer"}
}
},
"weightdevices": {
"type": "array",
"items": {"$ref": "#/definitions/weightdevice"}
},
"weightdevice": {
"type": "object",
"additionalProperties": false,
"properties": {
"major": {"type": "integer"},
"minor": {"type": "integer"},
"weight": {"type": "integer"},
"leafWeight": {"type": "integer"}
}
},
"throttledevices": {
"type": "array",
"items": {"$ref": "#/definitions/throttledevice"}
},
"throttledevice": {
"type": "object",
"additionalProperties": false,
"properties": {
"major": {"type": "integer"},
"minor": {"type": "integer"},
"rate": {"type": "integer"}
}
},
"blockio": {
"type": "object",
"additionalProperties": false,
"properties": {
"weight": {"type": "integer"},
"leafWeight": {"type": "integer"},
"weightDevice": {"$ref": "#/definitions/weightdevices"},
"throttleReadBpsDevice": {"$ref": "#/definitions/throttledevices"},
"throttleWriteBpsDevice": {"$ref": "#/definitions/throttledevices"},
"throttleReadIOPSDevice": {"$ref": "#/definitions/throttledevices"},
"throttleWriteIOPSDevice": {"$ref": "#/definitions/throttledevices"}
}
},
"hugepagelimits": {
"type": "array",
"items": {"$ref": "#/definitions/hugepagelimit"}
},
"hugepagelimit": {
"type": "object",
"additionalProperties": false,
"properties": {
"pageSize": {"type": "integer"},
"limit": {"type": "integer"}
}
},
"interfacepriorities": {
"type": "array",
"items": {"$ref": "#/definitions/interfacepriority"}
},
"interfacepriority": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {"type": "string"},
"priority": {"type": "integer"}
}
},
"network": {
"type": "object",
"additionalProperties": false,
"properties": {
"classID": {"type": "integer"},
"priorities": {"$ref": "#/definitions/interfacepriorities"}
}
},
"resources": {
"type": "object",
"additionalProperties": false,
"properties": {
"devices": {"$ref": "#/definitions/devicecgroups"},
"memory": {"$ref": "#/definitions/memory"},
"cpu": {"$ref": "#/definitions/cpu"},
"pids": {"$ref": "#/definitions/pids"},
"blockio": {"$ref": "#/definitions/blockio"},
"hugepageLimits": {"$ref": "#/definitions/hugepagelimits"},
"network": {"$ref": "#/definitions/network"}
}
},
"interfaces": {
"type": "array",
"items": {"$ref": "#/definitions/interface"}
},
"interface": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {"type": "string"},
"add": {"type": "string"},
"peer": {"type": "string"},
"createInRoot": {"type": "boolean"}
}
},
"namespaces": {
"type": "object",
"additionalProperties": false,
"properties": {
"cgroup": {"type": "string"},
"ipc": {"type": "string"},
"mnt": {"type": "string"},
"net": {"type": "string"},
"pid": {"type": "string"},
"user": {"type": "string"},
"uts": {"type": "string"}
}
},
"runtime": {
"type": "object",
"additionalProperties": false,
"properties": {
"cgroups": {"$ref": "#/definitions/strings"},
"mounts": {"$ref": "#/definitions/mounts"},
"mkdir": {"$ref": "#/definitions/strings"},
"interfaces": {"$ref": "#/definitions/interfaces"},
"bindNS": {"$ref": "#/definitions/namespaces"},
"namespace": {"type": "string"}
}
},
"image": {
"type": "object",
"additionalProperties": false,
"required": ["name", "image"],
"properties": {
"name": {"type": "string"},
"image": {"type": "string"},
"capabilities": { "$ref": "#/definitions/strings" },
"ambient": { "$ref": "#/definitions/strings" },
"mounts": { "$ref": "#/definitions/mounts" },
"binds": { "$ref": "#/definitions/strings" },
"tmpfs": { "$ref": "#/definitions/strings" },
"command": { "$ref": "#/definitions/strings" },
"env": { "$ref": "#/definitions/strings" },
"cwd": { "type": "string"},
"net": { "type": "string"},
"pid": { "type": "string"},
"ipc": { "type": "string"},
"uts": { "type": "string"},
"userns": { "type": "string"},
"readonly": { "type": "boolean"},
"maskedPaths": { "$ref": "#/definitions/strings" },
"readonlyPaths": { "$ref": "#/definitions/strings" },
"uid": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
"gid": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
"additionalGids": {
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}
},
"noNewPrivileges": {"type": "boolean"},
"hostname": {"type": "string"},
"oomScoreAdj": {"type": "integer"},
"rootfsPropagation": {"type": "string"},
"cgroupsPath": {"type": "string"},
"resources": {"$ref": "#/definitions/resources"},
"sysctl": { "$ref": "#/definitions/mapstring" },
"rlimits": { "$ref": "#/definitions/strings" },
"uidMappings": { "$ref": "#/definitions/idmappings" },
"gidMappings": { "$ref": "#/definitions/idmappings" },
"annotations": { "$ref": "#/definitions/mapstring" },
"runtime": {"$ref": "#/definitions/runtime"}
}
},
"images": {
"type": "array",
"items": { "$ref": "#/definitions/image" }
}
},
"properties": {
"kernel": { "$ref": "#/definitions/kernel" },
"init": { "$ref": "#/definitions/strings" },
"onboot": { "$ref": "#/definitions/images" },
"onshutdown": { "$ref": "#/definitions/images" },
"services": { "$ref": "#/definitions/images" },
"trust": { "$ref": "#/definitions/trust" },
"files": { "$ref": "#/definitions/files" }
}
}
`)

View File

@@ -1,207 +0,0 @@
package moby
import (
"crypto/tls"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/auth/challenge"
"github.com/docker/distribution/registry/client/transport"
"github.com/opencontainers/go-digest"
log "github.com/sirupsen/logrus"
notaryClient "github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf/data"
)
var (
// ReleasesRole is the role named "releases"
ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases"))
)
// TrustedReference parses an image string, and does a notary lookup to verify and retrieve the signed digest reference
func TrustedReference(image string) (reference.Reference, error) {
ref, err := reference.ParseAnyReference(image)
if err != nil {
return nil, err
}
// to mimic docker pull: if we have a digest already, it's implicitly trusted
if digestRef, ok := ref.(reference.Digested); ok {
return digestRef, nil
}
// to mimic docker pull: if we have a digest already, it's implicitly trusted
if canonicalRef, ok := ref.(reference.Canonical); ok {
return canonicalRef, nil
}
namedRef, ok := ref.(reference.Named)
if !ok {
return nil, errors.New("failed to resolve image digest using content trust: reference is not named")
}
taggedRef, ok := namedRef.(reference.NamedTagged)
if !ok {
return nil, errors.New("failed to resolve image digest using content trust: reference is not tagged")
}
gun := taggedRef.Name()
targetName := taggedRef.Tag()
server, err := getTrustServer(gun)
if err != nil {
return nil, err
}
rt, err := GetReadOnlyAuthTransport(server, []string{gun}, "", "", "")
if err != nil {
log.Debugf("failed to reach %s notary server for repo: %s, falling back to cache: %v", server, gun, err)
rt = nil
}
nRepo, err := notaryClient.NewFileCachedRepository(
trustDirectory(),
data.GUN(gun),
server,
rt,
nil,
trustpinning.TrustPinConfig{},
)
if err != nil {
return nil, err
}
target, err := nRepo.GetTargetByName(targetName, ReleasesRole, data.CanonicalTargetsRole)
if err != nil {
return nil, err
}
// Only get the tag if it's in the top level targets role or the releases delegation role
// ignore it if it's in any other delegation roles
if target.Role != ReleasesRole && target.Role != data.CanonicalTargetsRole {
return nil, errors.New("not signed in valid role")
}
h, ok := target.Hashes["sha256"]
if !ok {
return nil, errors.New("no valid hash, expecting sha256")
}
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
// Allow returning canonical reference with tag and digest
return reference.WithDigest(taggedRef, dgst)
}
func getTrustServer(gun string) (string, error) {
if strings.HasPrefix(gun, "docker.io/") {
return "https://notary.docker.io", nil
}
return "", errors.New("non-hub images not yet supported")
}
func trustDirectory() string {
return filepath.Join(MobyDir, "trust")
}
type credentialStore struct {
username string
password string
refreshTokens map[string]string
}
func (tcs *credentialStore) Basic(url *url.URL) (string, string) {
return tcs.username, tcs.password
}
// refresh tokens are the long lived tokens that can be used instead of a password
func (tcs *credentialStore) RefreshToken(u *url.URL, service string) string {
return tcs.refreshTokens[service]
}
func (tcs *credentialStore) SetRefreshToken(u *url.URL, service string, token string) {
if tcs.refreshTokens != nil {
tcs.refreshTokens[service] = token
}
}
// GetReadOnlyAuthTransport gets the Auth Transport used to communicate with notary
func GetReadOnlyAuthTransport(server string, scopes []string, username, password, rootCAPath string) (http.RoundTripper, error) {
httpsTransport, err := httpsTransport(rootCAPath)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", fmt.Sprintf("%s/v2/", server), nil)
if err != nil {
return nil, err
}
pingClient := &http.Client{
Transport: httpsTransport,
Timeout: 5 * time.Second,
}
resp, err := pingClient.Do(req)
if err != nil {
return nil, err
}
challengeManager := challenge.NewSimpleManager()
if err := challengeManager.AddResponse(resp); err != nil {
return nil, err
}
creds := credentialStore{
username: username,
password: password,
refreshTokens: make(map[string]string),
}
var scopeObjs []auth.Scope
for _, scopeName := range scopes {
scopeObjs = append(scopeObjs, auth.RepositoryScope{
Repository: scopeName,
Actions: []string{"pull"},
})
}
// allow setting multiple scopes so we don't have to reauth
tokenHandler := auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{
Transport: httpsTransport,
Credentials: &creds,
Scopes: scopeObjs,
})
authedTransport := transport.NewTransport(httpsTransport, auth.NewAuthorizer(challengeManager, tokenHandler))
return authedTransport, nil
}
func httpsTransport(caFile string) (*http.Transport, error) {
tlsConfig := &tls.Config{}
transport := http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
}
// Override with the system cert pool if the caFile was empty
if caFile != "" {
certPool := x509.NewCertPool()
pems, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, err
}
certPool.AppendCertsFromPEM(pems)
transport.TLSClientConfig.RootCAs = certPool
}
return &transport, nil
}

View File

@@ -1,16 +0,0 @@
package moby
import (
"path/filepath"
)
var (
// MobyDir is the location of the cache directory, defaults to ~/.moby
MobyDir string
)
func defaultMobyConfigDir() string {
mobyDefaultDir := ".moby"
home := homeDir()
return filepath.Join(home, mobyDefaultDir)
}

View File

@@ -1,11 +0,0 @@
// +build !windows
package moby
import (
"os"
)
func homeDir() string {
return os.Getenv("HOME")
}

View File

@@ -1,9 +0,0 @@
package moby
import (
"os"
)
func homeDir() string {
return os.Getenv("USERPROFILE")
}

View File

@@ -1,46 +0,0 @@
package pad4
import (
"bytes"
"io"
)
// A Writer is an io.WriteCloser. Writes are padded with zeros to 4 byte boundary
type Writer struct {
w io.Writer
count int
}
// Write writes output
func (pad Writer) Write(p []byte) (int, error) {
n, err := pad.w.Write(p)
if err != nil {
return 0, err
}
pad.count += n
return n, nil
}
// Close adds the padding
func (pad Writer) Close() error {
mod4 := pad.count & 3
if mod4 == 0 {
return nil
}
zero := make([]byte, 4-mod4)
buf := bytes.NewBuffer(zero)
n, err := io.Copy(pad.w, buf)
if err != nil {
return err
}
pad.count += int(n)
return nil
}
// NewWriter provides a new io.WriteCloser that zero pads the
// output to a multiple of four bytes
func NewWriter(w io.Writer) *Writer {
pad := new(Writer)
pad.w = w
return pad
}