diff --git a/go.mod b/go.mod index 97fce5e4..4d45e227 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 + github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible github.com/docker/go-units v0.4.0 github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd @@ -36,6 +37,7 @@ require ( github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 github.com/onsi/ginkgo v1.14.2 github.com/onsi/gomega v1.10.3 + github.com/opencontainers/image-spec v1.0.1 github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578 github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f github.com/pkg/errors v0.9.1 @@ -47,9 +49,9 @@ require ( go.uber.org/atomic v1.5.1 // indirect go.uber.org/multierr v1.4.0 // indirect go.uber.org/zap v1.13.0 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect gopkg.in/yaml.v2 v2.3.0 helm.sh/helm/v3 v3.3.4 + ) replace github.com/docker/docker => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible diff --git a/go.sum b/go.sum index 053167d8..811ddac0 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200220082720-bb896865146c h1:2pWkaq3X2yFR5o5OI7QP0CYNNKtfE2ZCK3hMRaTkhmc= github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200220082720-bb896865146c/go.mod h1:K4kx7xAA5JimeQCnN+dbeLlfaBxzZLaLiDD8lusFI8w= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml deleted file mode 100644 index 16827ec3..00000000 --- a/vendor/github.com/containerd/console/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: go -go: - - "1.12.x" - - "1.13.x" - -go_import_path: github.com/containerd/console - -env: - - GO111MODULE=on - -install: - - pushd ..; go get -u github.com/vbatts/git-validation; popd - - pushd ..; go get -u github.com/kunalkushwaha/ltag; popd - -before_script: - - pushd ..; git clone https://github.com/containerd/project; popd - -script: - - DCO_VERBOSITY=-q ../project/script/validate/dco - - ../project/script/validate/fileheader ../project/ - - travis_wait ../project/script/validate/vendor - - go test -race - - GOOS=openbsd go build - - GOOS=openbsd go test -c - - GOOS=solaris go build - - GOOS=solaris go test -c - - GOOS=windows go test diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE deleted file mode 100644 index 584149b6..00000000 --- a/vendor/github.com/containerd/console/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md deleted file mode 100644 index 5392fdaf..00000000 --- a/vendor/github.com/containerd/console/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# console - -[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) - -Golang package for dealing with consoles. Light on deps and a simple API. - -## Modifying the current process - -```go -current := console.Current() -defer current.Reset() - -if err := current.SetRaw(); err != nil { -} -ws, err := current.Size() -current.Resize(ws) -``` - -## Project details - -console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go deleted file mode 100644 index 6a36d147..00000000 --- a/vendor/github.com/containerd/console/console.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "errors" - "io" - "os" -) - -var ErrNotAConsole = errors.New("provided file is not a console") - -type File interface { - io.ReadWriteCloser - - // Fd returns its file descriptor - Fd() uintptr - // Name returns its file name - Name() string -} - -type Console interface { - File - - // Resize resizes the console to the provided window size - Resize(WinSize) error - // ResizeFrom resizes the calling console to the size of the - // provided console - ResizeFrom(Console) error - // SetRaw sets the console in raw mode - SetRaw() error - // DisableEcho disables echo on the console - DisableEcho() error - // Reset restores the console to its orignal state - Reset() error - // Size returns the window size of the console - Size() (WinSize, error) -} - -// WinSize specifies the window size of the console -type WinSize struct { - // Height of the console - Height uint16 - // Width of the console - Width uint16 - x uint16 - y uint16 -} - -// Current returns the current processes console -func Current() Console { - c, err := ConsoleFromFile(os.Stdin) - if err != nil { - // stdin should always be a console for the design - // of this function - panic(err) - } - return c -} - -// ConsoleFromFile returns a console using the provided file -func ConsoleFromFile(f File) (Console, error) { - if err := checkConsole(f); err != nil { - return nil, err - } - return newMaster(f) -} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go deleted file mode 100644 index c1c839ee..00000000 --- a/vendor/github.com/containerd/console/console_linux.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "io" - "os" - "sync" - - "golang.org/x/sys/unix" -) - -const ( - maxEvents = 128 -) - -// Epoller manages multiple epoll consoles using edge-triggered epoll api so we -// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. -// For more details, see: -// - https://github.com/systemd/systemd/pull/4262 -// - https://github.com/moby/moby/issues/27202 -// -// Example usage of Epoller and EpollConsole can be as follow: -// -// epoller, _ := NewEpoller() -// epollConsole, _ := epoller.Add(console) -// go epoller.Wait() -// var ( -// b bytes.Buffer -// wg sync.WaitGroup -// ) -// wg.Add(1) -// go func() { -// io.Copy(&b, epollConsole) -// wg.Done() -// }() -// // perform I/O on the console -// epollConsole.Shutdown(epoller.CloseConsole) -// wg.Wait() -// epollConsole.Close() -type Epoller struct { - efd int - mu sync.Mutex - fdMapping map[int]*EpollConsole - closeOnce sync.Once -} - -// NewEpoller returns an instance of epoller with a valid epoll fd. -func NewEpoller() (*Epoller, error) { - efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if err != nil { - return nil, err - } - return &Epoller{ - efd: efd, - fdMapping: make(map[int]*EpollConsole), - }, nil -} - -// Add creates an epoll console based on the provided console. The console will -// be registered with EPOLLET (i.e. using edge-triggered notification) and its -// file descriptor will be set to non-blocking mode. After this, user should use -// the return console to perform I/O. -func (e *Epoller) Add(console Console) (*EpollConsole, error) { - sysfd := int(console.Fd()) - // Set sysfd to non-blocking mode - if err := unix.SetNonblock(sysfd, true); err != nil { - return nil, err - } - - ev := unix.EpollEvent{ - Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, - Fd: int32(sysfd), - } - if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { - return nil, err - } - ef := &EpollConsole{ - Console: console, - sysfd: sysfd, - readc: sync.NewCond(&sync.Mutex{}), - writec: sync.NewCond(&sync.Mutex{}), - } - e.mu.Lock() - e.fdMapping[sysfd] = ef - e.mu.Unlock() - return ef, nil -} - -// Wait starts the loop to wait for its consoles' notifications and signal -// appropriate console that it can perform I/O. -func (e *Epoller) Wait() error { - events := make([]unix.EpollEvent, maxEvents) - for { - n, err := unix.EpollWait(e.efd, events, -1) - if err != nil { - // EINTR: The call was interrupted by a signal handler before either - // any of the requested events occurred or the timeout expired - if err == unix.EINTR { - continue - } - return err - } - for i := 0; i < n; i++ { - ev := &events[i] - // the console is ready to be read from - if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalRead() - } - } - // the console is ready to be written to - if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalWrite() - } - } - } - } -} - -// CloseConsole unregisters the console's file descriptor from epoll interface -func (e *Epoller) CloseConsole(fd int) error { - e.mu.Lock() - defer e.mu.Unlock() - delete(e.fdMapping, fd) - return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) -} - -func (e *Epoller) getConsole(sysfd int) *EpollConsole { - e.mu.Lock() - f := e.fdMapping[sysfd] - e.mu.Unlock() - return f -} - -// Close closes the epoll fd -func (e *Epoller) Close() error { - closeErr := os.ErrClosed // default to "file already closed" - e.closeOnce.Do(func() { - closeErr = unix.Close(e.efd) - }) - return closeErr -} - -// EpollConsole acts like a console but registers its file descriptor with an -// epoll fd and uses epoll API to perform I/O. -type EpollConsole struct { - Console - readc *sync.Cond - writec *sync.Cond - sysfd int - closed bool -} - -// Read reads up to len(p) bytes into p. It returns the number of bytes read -// (0 <= n <= len(p)) and any error encountered. -// -// If the console's read returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Read(p []byte) (n int, err error) { - var read int - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - for { - read, err = ec.Console.Read(p[n:]) - n += read - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappear, assume this is temporary and wait for the - // signal to continue again. Unless we didnt read anything and the - // console is already marked as closed then we should exit - if hangup && !(n == 0 && len(p) > 0 && ec.closed) { - ec.readc.Wait() - continue - } - } - break - } - // if we didnt read anything then return io.EOF to end gracefully - if n == 0 && len(p) > 0 && err == nil { - err = io.EOF - } - // signal for others that we finished the read - ec.readc.Signal() - return n, err -} - -// Writes len(p) bytes from p to the console. It returns the number of bytes -// written from p (0 <= n <= len(p)) and any error encountered that caused -// the write to stop early. -// -// If writes to the console returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Write(p []byte) (n int, err error) { - var written int - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - for { - written, err = ec.Console.Write(p[n:]) - n += written - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappears, assume this is temporary and wait for the - // signal to continue again. - if hangup { - ec.writec.Wait() - continue - } - } - // unrecoverable error, break the loop and return the error - break - } - if n < len(p) && err == nil { - err = io.ErrShortWrite - } - // signal for others that we finished the write - ec.writec.Signal() - return n, err -} - -// Shutdown closes the file descriptor and signals call waiters for this fd. -// It accepts a callback which will be called with the console's fd. The -// callback typically will be used to do further cleanup such as unregister the -// console's fd from the epoll interface. -// User should call Shutdown and wait for all I/O operation to be finished -// before closing the console. -func (ec *EpollConsole) Shutdown(close func(int) error) error { - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - - ec.readc.Broadcast() - ec.writec.Broadcast() - ec.closed = true - return close(ec.sysfd) -} - -// signalRead signals that the console is readable. -func (ec *EpollConsole) signalRead() { - ec.readc.L.Lock() - ec.readc.Signal() - ec.readc.L.Unlock() -} - -// signalWrite signals that the console is writable. -func (ec *EpollConsole) signalWrite() { - ec.writec.L.Lock() - ec.writec.Signal() - ec.writec.L.Unlock() -} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go deleted file mode 100644 index 315f1d0c..00000000 --- a/vendor/github.com/containerd/console/console_unix.go +++ /dev/null @@ -1,158 +0,0 @@ -// +build darwin freebsd linux openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// NewPty creates a new pty pair -// The master is returned as the first console and a string -// with the path to the pty slave is returned as the second -func NewPty() (Console, string, error) { - f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, "", err - } - slave, err := ptsname(f) - if err != nil { - return nil, "", err - } - if err := unlockpt(f); err != nil { - return nil, "", err - } - m, err := newMaster(f) - if err != nil { - return nil, "", err - } - return m, slave, nil -} - -type master struct { - f File - original *unix.Termios -} - -func (m *master) Read(b []byte) (int, error) { - return m.f.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return m.f.Write(b) -} - -func (m *master) Close() error { - return m.f.Close() -} - -func (m *master) Resize(ws WinSize) error { - return tcswinsz(m.f.Fd(), ws) -} - -func (m *master) ResizeFrom(c Console) error { - ws, err := c.Size() - if err != nil { - return err - } - return m.Resize(ws) -} - -func (m *master) Reset() error { - if m.original == nil { - return nil - } - return tcset(m.f.Fd(), m.original) -} - -func (m *master) getCurrent() (unix.Termios, error) { - var termios unix.Termios - if err := tcget(m.f.Fd(), &termios); err != nil { - return unix.Termios{}, err - } - return termios, nil -} - -func (m *master) SetRaw() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState = cfmakeraw(rawState) - rawState.Oflag = rawState.Oflag | unix.OPOST - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) DisableEcho() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState.Lflag = rawState.Lflag &^ unix.ECHO - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) Size() (WinSize, error) { - return tcgwinsz(m.f.Fd()) -} - -func (m *master) Fd() uintptr { - return m.f.Fd() -} - -func (m *master) Name() string { - return m.f.Name() -} - -// checkConsole checks if the provided file is a console -func checkConsole(f File) error { - var termios unix.Termios - if tcget(f.Fd(), &termios) != nil { - return ErrNotAConsole - } - return nil -} - -func newMaster(f File) (Console, error) { - m := &master{ - f: f, - } - t, err := m.getCurrent() - if err != nil { - return nil, err - } - m.original = &t - return m, nil -} - -// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func ClearONLCR(fd uintptr) error { - return setONLCR(fd, false) -} - -// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts as intended for a terminal emulator. -func SetONLCR(fd uintptr) error { - return setONLCR(fd, true) -} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go deleted file mode 100644 index 129a9288..00000000 --- a/vendor/github.com/containerd/console/console_windows.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -var ( - vtInputSupported bool - ErrNotImplemented = errors.New("not implemented") -) - -func (m *master) initStdios() { - m.in = windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { - // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - windows.SetConsoleMode(m.in, m.inMode) - } else { - fmt.Printf("failed to get console mode for stdin: %v\n", err) - } - - m.out = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { - if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.out, m.outMode) - } - } else { - fmt.Printf("failed to get console mode for stdout: %v\n", err) - } - - m.err = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { - if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.err, m.errMode) - } - } else { - fmt.Printf("failed to get console mode for stderr: %v\n", err) - } -} - -type master struct { - in windows.Handle - inMode uint32 - - out windows.Handle - outMode uint32 - - err windows.Handle - errMode uint32 -} - -func (m *master) SetRaw() error { - if err := makeInputRaw(m.in, m.inMode); err != nil { - return err - } - - // Set StdOut and StdErr to raw mode, we ignore failures since - // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of - // Windows. - - windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - return nil -} - -func (m *master) Reset() error { - for _, s := range []struct { - fd windows.Handle - mode uint32 - }{ - {m.in, m.inMode}, - {m.out, m.outMode}, - {m.err, m.errMode}, - } { - if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { - return errors.Wrap(err, "unable to restore console mode") - } - } - - return nil -} - -func (m *master) Size() (WinSize, error) { - var info windows.ConsoleScreenBufferInfo - err := windows.GetConsoleScreenBufferInfo(m.out, &info) - if err != nil { - return WinSize{}, errors.Wrap(err, "unable to get console info") - } - - winsize := WinSize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -func (m *master) Resize(ws WinSize) error { - return ErrNotImplemented -} - -func (m *master) ResizeFrom(c Console) error { - return ErrNotImplemented -} - -func (m *master) DisableEcho() error { - mode := m.inMode &^ windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT - mode |= windows.ENABLE_LINE_INPUT - - if err := windows.SetConsoleMode(m.in, mode); err != nil { - return errors.Wrap(err, "unable to set console to disable echo") - } - - return nil -} - -func (m *master) Close() error { - return nil -} - -func (m *master) Read(b []byte) (int, error) { - return os.Stdin.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return os.Stdout.Write(b) -} - -func (m *master) Fd() uintptr { - return uintptr(m.in) -} - -// on windows, console can only be made from os.Std{in,out,err}, hence there -// isnt a single name here we can use. Return a dummy "console" value in this -// case should be sufficient. -func (m *master) Name() string { - return "console" -} - -// makeInputRaw puts the terminal (Windows Console) connected to the given -// file descriptor into raw mode -func makeInputRaw(fd windows.Handle, mode uint32) error { - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - if err := windows.SetConsoleMode(fd, mode); err != nil { - return errors.Wrap(err, "unable to set console to raw mode") - } - - return nil -} - -func checkConsole(f File) error { - var mode uint32 - if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { - return err - } - return nil -} - -func newMaster(f File) (Console, error) { - if f != os.Stdin && f != os.Stdout && f != os.Stderr { - return nil, errors.New("creating a console from a file is not supported on windows") - } - m := &master{} - m.initStdios() - return m, nil -} diff --git a/vendor/github.com/containerd/console/go.mod b/vendor/github.com/containerd/console/go.mod deleted file mode 100644 index 97b587d6..00000000 --- a/vendor/github.com/containerd/console/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/containerd/console - -go 1.13 - -require ( - github.com/pkg/errors v0.8.1 - golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e -) diff --git a/vendor/github.com/containerd/console/go.sum b/vendor/github.com/containerd/console/go.sum deleted file mode 100644 index 25205cc9..00000000 --- a/vendor/github.com/containerd/console/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go deleted file mode 100644 index b0128abb..00000000 --- a/vendor/github.com/containerd/console/tc_darwin.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -func ioctl(fd, flag, data uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go deleted file mode 100644 index 04583a61..00000000 --- a/vendor/github.com/containerd/console/tc_freebsd.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -// This does not exist on FreeBSD, it does not allocate controlling terminals on open -func unlockpt(f *os.File) error { - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go deleted file mode 100644 index 1bdd68e6..00000000 --- a/vendor/github.com/containerd/console/tc_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { - return err - } - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var u uint32 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", u), nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go deleted file mode 100644 index f0cec06a..00000000 --- a/vendor/github.com/containerd/console/tc_openbsd_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build openbsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go deleted file mode 100644 index daccce20..00000000 --- a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build openbsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go deleted file mode 100644 index e36a68ed..00000000 --- a/vendor/github.com/containerd/console/tc_solaris_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build solaris,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go deleted file mode 100644 index eb0bd2c3..00000000 --- a/vendor/github.com/containerd/console/tc_solaris_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build solaris,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go deleted file mode 100644 index 7ae773c5..00000000 --- a/vendor/github.com/containerd/console/tc_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build darwin freebsd linux openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr, p *unix.Termios) error { - termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) - if err != nil { - return err - } - *p = *termios - return nil -} - -func tcset(fd uintptr, p *unix.Termios) error { - return unix.IoctlSetTermios(int(fd), cmdTcSet, p) -} - -func tcgwinsz(fd uintptr) (WinSize, error) { - var ws WinSize - - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - if err != nil { - return ws, err - } - - // Translate from unix.Winsize to console.WinSize - ws.Height = uws.Row - ws.Width = uws.Col - ws.x = uws.Xpixel - ws.y = uws.Ypixel - return ws, nil -} - -func tcswinsz(fd uintptr, ws WinSize) error { - // Translate from console.WinSize to unix.Winsize - - var uws unix.Winsize - uws.Row = ws.Height - uws.Col = ws.Width - uws.Xpixel = ws.x - uws.Ypixel = ws.y - - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) -} - -func setONLCR(fd uintptr, enable bool) error { - var termios unix.Termios - if err := tcget(fd, &termios); err != nil { - return err - } - if enable { - // Set +onlcr so we can act like a real terminal - termios.Oflag |= unix.ONLCR - } else { - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - } - return tcset(fd, &termios) -} - -func cfmakeraw(t unix.Termios) unix.Termios { - t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - t.Oflag &^= unix.OPOST - t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - t.Cflag &^= (unix.CSIZE | unix.PARENB) - t.Cflag &^= unix.CS8 - t.Cc[unix.VMIN] = 1 - t.Cc[unix.VTIME] = 0 - - return t -} diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/check.go deleted file mode 100644 index cec46df0..00000000 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/check.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package overlay - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/continuity/fs" - "github.com/pkg/errors" -) - -// supportsMultipleLowerDir checks if the system supports multiple lowerdirs, -// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs -// are always available (so this check isn't needed), and backported to RHEL and -// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect -// support on those kernels, without doing a kernel version compare. -// -// Ported from moby overlay2. -func supportsMultipleLowerDir(d string) error { - td, err := ioutil.TempDir(d, "multiple-lowerdir-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - log.L.WithError(err).Warnf("Failed to remove check directory %v", td) - } - }() - - for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { - if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { - return err - } - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")) - m := mount.Mount{ - Type: "overlay", - Source: "overlay", - Options: []string{opts}, - } - dest := filepath.Join(td, "merged") - if err := m.Mount(dest); err != nil { - return errors.Wrap(err, "failed to mount overlay") - } - if err := mount.UnmountAll(dest, 0); err != nil { - log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) - } - return nil -} - -// Supported returns nil when the overlayfs is functional on the system with the root directory. -// Supported is not called during plugin initialization, but exposed for downstream projects which uses -// this snapshotter as a library. -func Supported(root string) error { - if err := os.MkdirAll(root, 0700); err != nil { - return err - } - supportsDType, err := fs.SupportsDType(root) - if err != nil { - return err - } - if !supportsDType { - return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) - } - return supportsMultipleLowerDir(root) -} diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go deleted file mode 100644 index 2222207a..00000000 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go +++ /dev/null @@ -1,513 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package overlay - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/plugin" - "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/storage" - "github.com/containerd/continuity/fs" - "github.com/pkg/errors" -) - -func init() { - plugin.Register(&plugin.Registration{ - Type: plugin.SnapshotPlugin, - ID: "overlayfs", - InitFn: func(ic *plugin.InitContext) (interface{}, error) { - ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) - ic.Meta.Exports["root"] = ic.Root - return NewSnapshotter(ic.Root, AsynchronousRemove) - }, - }) -} - -// SnapshotterConfig is used to configure the overlay snapshotter instance -type SnapshotterConfig struct { - asyncRemove bool -} - -// Opt is an option to configure the overlay snapshotter -type Opt func(config *SnapshotterConfig) error - -// AsynchronousRemove defers removal of filesystem content until -// the Cleanup method is called. Removals will make the snapshot -// referred to by the key unavailable and make the key immediately -// available for re-use. -func AsynchronousRemove(config *SnapshotterConfig) error { - config.asyncRemove = true - return nil -} - -type snapshotter struct { - root string - ms *storage.MetaStore - asyncRemove bool -} - -// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs -// diffs are stored under the provided root. A metadata file is stored under -// the root. -func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { - var config SnapshotterConfig - for _, opt := range opts { - if err := opt(&config); err != nil { - return nil, err - } - } - - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - supportsDType, err := fs.SupportsDType(root) - if err != nil { - return nil, err - } - if !supportsDType { - return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) - } - ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) - if err != nil { - return nil, err - } - - if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - - return &snapshotter{ - root: root, - ms: ms, - asyncRemove: config.asyncRemove, - }, nil -} - -// Stat returns the info for an active or committed snapshot by name or -// key. -// -// Should be used for parent resolution, existence checks and to discern -// the kind of snapshot. -func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { - ctx, t, err := o.ms.TransactionContext(ctx, false) - if err != nil { - return snapshots.Info{}, err - } - defer t.Rollback() - _, info, _, err := storage.GetInfo(ctx, key) - if err != nil { - return snapshots.Info{}, err - } - - return info, nil -} - -func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { - ctx, t, err := o.ms.TransactionContext(ctx, true) - if err != nil { - return snapshots.Info{}, err - } - - info, err = storage.UpdateInfo(ctx, info, fieldpaths...) - if err != nil { - t.Rollback() - return snapshots.Info{}, err - } - - if err := t.Commit(); err != nil { - return snapshots.Info{}, err - } - - return info, nil -} - -// Usage returns the resources taken by the snapshot identified by key. -// -// For active snapshots, this will scan the usage of the overlay "diff" (aka -// "upper") directory and may take some time. -// -// For committed snapshots, the value is returned from the metadata database. -func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { - ctx, t, err := o.ms.TransactionContext(ctx, false) - if err != nil { - return snapshots.Usage{}, err - } - id, info, usage, err := storage.GetInfo(ctx, key) - t.Rollback() // transaction no longer needed at this point. - - if err != nil { - return snapshots.Usage{}, err - } - - if info.Kind == snapshots.KindActive { - upperPath := o.upperPath(id) - du, err := fs.DiskUsage(ctx, upperPath) - if err != nil { - // TODO(stevvooe): Consider not reporting an error in this case. - return snapshots.Usage{}, err - } - - usage = snapshots.Usage(du) - } - - return usage, nil -} - -func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) -} - -func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) -} - -// Mounts returns the mounts for the transaction identified by key. Can be -// called on an read-write or readonly transaction. -// -// This can be used to recover mounts after calling View or Prepare. -func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { - ctx, t, err := o.ms.TransactionContext(ctx, false) - if err != nil { - return nil, err - } - s, err := storage.GetSnapshot(ctx, key) - t.Rollback() - if err != nil { - return nil, errors.Wrap(err, "failed to get active mount") - } - return o.mounts(s), nil -} - -func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { - ctx, t, err := o.ms.TransactionContext(ctx, true) - if err != nil { - return err - } - - defer func() { - if err != nil { - if rerr := t.Rollback(); rerr != nil { - log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") - } - } - }() - - // grab the existing id - id, _, _, err := storage.GetInfo(ctx, key) - if err != nil { - return err - } - - usage, err := fs.DiskUsage(ctx, o.upperPath(id)) - if err != nil { - return err - } - - if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { - return errors.Wrap(err, "failed to commit snapshot") - } - return t.Commit() -} - -// Remove abandons the snapshot identified by key. The snapshot will -// immediately become unavailable and unrecoverable. Disk space will -// be freed up on the next call to `Cleanup`. -func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { - ctx, t, err := o.ms.TransactionContext(ctx, true) - if err != nil { - return err - } - defer func() { - if err != nil { - if rerr := t.Rollback(); rerr != nil { - log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") - } - } - }() - - _, _, err = storage.Remove(ctx, key) - if err != nil { - return errors.Wrap(err, "failed to remove") - } - - if !o.asyncRemove { - var removals []string - removals, err = o.getCleanupDirectories(ctx, t) - if err != nil { - return errors.Wrap(err, "unable to get directories for removal") - } - - // Remove directories after the transaction is closed, failures must not - // return error since the transaction is committed with the removal - // key no longer available. - defer func() { - if err == nil { - for _, dir := range removals { - if err := os.RemoveAll(dir); err != nil { - log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") - } - } - } - }() - - } - - return t.Commit() -} - -// Walk the snapshots. -func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { - ctx, t, err := o.ms.TransactionContext(ctx, false) - if err != nil { - return err - } - defer t.Rollback() - return storage.WalkInfo(ctx, fn, fs...) -} - -// Cleanup cleans up disk resources from removed or abandoned snapshots -func (o *snapshotter) Cleanup(ctx context.Context) error { - cleanup, err := o.cleanupDirectories(ctx) - if err != nil { - return err - } - - for _, dir := range cleanup { - if err := os.RemoveAll(dir); err != nil { - log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") - } - } - - return nil -} - -func (o *snapshotter) cleanupDirectories(ctx context.Context) ([]string, error) { - // Get a write transaction to ensure no other write transaction can be entered - // while the cleanup is scanning. - ctx, t, err := o.ms.TransactionContext(ctx, true) - if err != nil { - return nil, err - } - - defer t.Rollback() - return o.getCleanupDirectories(ctx, t) -} - -func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor) ([]string, error) { - ids, err := storage.IDMap(ctx) - if err != nil { - return nil, err - } - - snapshotDir := filepath.Join(o.root, "snapshots") - fd, err := os.Open(snapshotDir) - if err != nil { - return nil, err - } - defer fd.Close() - - dirs, err := fd.Readdirnames(0) - if err != nil { - return nil, err - } - - cleanup := []string{} - for _, d := range dirs { - if _, ok := ids[d]; ok { - continue - } - - cleanup = append(cleanup, filepath.Join(snapshotDir, d)) - } - - return cleanup, nil -} - -func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { - ctx, t, err := o.ms.TransactionContext(ctx, true) - if err != nil { - return nil, err - } - - var td, path string - defer func() { - if err != nil { - if td != "" { - if err1 := os.RemoveAll(td); err1 != nil { - log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory") - } - } - if path != "" { - if err1 := os.RemoveAll(path); err1 != nil { - log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") - err = errors.Wrapf(err, "failed to remove path: %v", err1) - } - } - } - }() - - snapshotDir := filepath.Join(o.root, "snapshots") - td, err = o.prepareDirectory(ctx, snapshotDir, kind) - if err != nil { - if rerr := t.Rollback(); rerr != nil { - log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") - } - return nil, errors.Wrap(err, "failed to create prepare snapshot dir") - } - rollback := true - defer func() { - if rollback { - if rerr := t.Rollback(); rerr != nil { - log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") - } - } - }() - - s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) - if err != nil { - return nil, errors.Wrap(err, "failed to create snapshot") - } - - if len(s.ParentIDs) > 0 { - st, err := os.Stat(o.upperPath(s.ParentIDs[0])) - if err != nil { - return nil, errors.Wrap(err, "failed to stat parent") - } - - stat := st.Sys().(*syscall.Stat_t) - - if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { - if rerr := t.Rollback(); rerr != nil { - log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") - } - return nil, errors.Wrap(err, "failed to chown") - } - } - - path = filepath.Join(snapshotDir, s.ID) - if err = os.Rename(td, path); err != nil { - return nil, errors.Wrap(err, "failed to rename") - } - td = "" - - rollback = false - if err = t.Commit(); err != nil { - return nil, errors.Wrap(err, "commit failed") - } - - return o.mounts(s), nil -} - -func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { - td, err := ioutil.TempDir(snapshotDir, "new-") - if err != nil { - return "", errors.Wrap(err, "failed to create temp dir") - } - - if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { - return td, err - } - - if kind == snapshots.KindActive { - if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { - return td, err - } - } - - return td, nil -} - -func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { - if len(s.ParentIDs) == 0 { - // if we only have one layer/no parents then just return a bind mount as overlay - // will not work - roFlag := "rw" - if s.Kind == snapshots.KindView { - roFlag = "ro" - } - - return []mount.Mount{ - { - Source: o.upperPath(s.ID), - Type: "bind", - Options: []string{ - roFlag, - "rbind", - }, - }, - } - } - var options []string - - if s.Kind == snapshots.KindActive { - options = append(options, - fmt.Sprintf("workdir=%s", o.workPath(s.ID)), - fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), - ) - } else if len(s.ParentIDs) == 1 { - return []mount.Mount{ - { - Source: o.upperPath(s.ParentIDs[0]), - Type: "bind", - Options: []string{ - "ro", - "rbind", - }, - }, - } - } - - parentPaths := make([]string, len(s.ParentIDs)) - for i := range s.ParentIDs { - parentPaths[i] = o.upperPath(s.ParentIDs[i]) - } - - options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) - return []mount.Mount{ - { - Type: "overlay", - Source: "overlay", - Options: options, - }, - } - -} - -func (o *snapshotter) upperPath(id string) string { - return filepath.Join(o.root, "snapshots", id, "fs") -} - -func (o *snapshotter) workPath(id string) string { - return filepath.Join(o.root, "snapshots", id, "work") -} - -// Close closes the snapshotter -func (o *snapshotter) Close() error { - return o.ms.Close() -} diff --git a/vendor/github.com/containerd/go-cni/.travis.yml b/vendor/github.com/containerd/go-cni/.travis.yml deleted file mode 100644 index 48fbb6dc..00000000 --- a/vendor/github.com/containerd/go-cni/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -go: - - 1.12.x - - tip - -go_import_path: github.com/containerd/go-cni - -install: - - go get -d - - env GO111MODULE=off go get -u github.com/vbatts/git-validation - - env GO111MODULE=off go get -u github.com/kunalkushwaha/ltag - -before_script: - - pushd ..; git clone https://github.com/containerd/project; popd - -script: - - DCO_VERBOSITY=-q ../project/script/validate/dco - - ../project/script/validate/fileheader ../project/ - - env GO111MODULE=on ../project/script/validate/vendor - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/go-cni/LICENSE b/vendor/github.com/containerd/go-cni/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/containerd/go-cni/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/go-cni/README.md b/vendor/github.com/containerd/go-cni/README.md deleted file mode 100644 index 3b1a4aa7..00000000 --- a/vendor/github.com/containerd/go-cni/README.md +++ /dev/null @@ -1,60 +0,0 @@ -[![Build Status](https://travis-ci.org/containerd/go-cni.svg?branch=master)](https://travis-ci.org/containerd/go-cni) [![GoDoc](https://godoc.org/github.com/containerd/go-cni?status.svg)](https://godoc.org/github.com/containerd/go-cni) - -# go-cni - -A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to: - -- Load CNI network config from different sources -- Setup networks for container namespace -- Remove networks from container namespace -- Query status of CNI network plugin initialization - -go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni) - -## Usage -```go -func main() { - id := "123456" - netns := "/proc/9999/ns/net" - defaultIfName := "eth0" - // Initialize library - l = gocni.New(gocni.WithMinNetworkCount(2), - gocni.WithPluginConfDir("/etc/mycni/net.d"), - gocni.WithPluginDir([]string{"/opt/mycni/bin", "/opt/cni/bin"}), - gocni.WithDefaultIfName(defaultIfName)) - - // Load the cni configuration - err:= l.Load(gocni.WithLoNetwork, gocni.WithDefaultConf) - if err != nil{ - log.Errorf("failed to load cni configuration: %v", err) - return - } - - // Setup network for namespace. - labels := map[string]string{ - "K8S_POD_NAMESPACE": "namespace1", - "K8S_POD_NAME": "pod1", - "K8S_POD_INFRA_CONTAINER_ID": id, - } - result, err := l.Setup(id, netns, gocni.WithLabels(labels)) - if err != nil { - log.Errorf("failed to setup network for namespace %q: %v",id, err) - return - } - - // Get IP of the default interface - IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String() - fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP) -} -``` - -## Project details - -The go-cni is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go deleted file mode 100644 index 8acc83b3..00000000 --- a/vendor/github.com/containerd/go-cni/cni.go +++ /dev/null @@ -1,220 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "context" - "fmt" - "strings" - "sync" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/current" - "github.com/pkg/errors" -) - -type CNI interface { - // Setup setup the network for the namespace - Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*CNIResult, error) - // Remove tears down the network of the namespace. - Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error - // Load loads the cni network config - Load(opts ...CNIOpt) error - // Status checks the status of the cni initialization - Status() error - // GetConfig returns a copy of the CNI plugin configurations as parsed by CNI - GetConfig() *ConfigResult -} - -type ConfigResult struct { - PluginDirs []string - PluginConfDir string - PluginMaxConfNum int - Prefix string - Networks []*ConfNetwork -} - -type ConfNetwork struct { - Config *NetworkConfList - IFName string -} - -// NetworkConfList is a source bytes to string version of cnilibrary.NetworkConfigList -type NetworkConfList struct { - Name string - CNIVersion string - Plugins []*NetworkConf - Source string -} - -// NetworkConf is a source bytes to string conversion of cnilibrary.NetworkConfig -type NetworkConf struct { - Network *types.NetConf - Source string -} - -type libcni struct { - config - - cniConfig cnilibrary.CNI - networkCount int // minimum network plugin configurations needed to initialize cni - networks []*Network - sync.RWMutex -} - -func defaultCNIConfig() *libcni { - return &libcni{ - config: config{ - pluginDirs: []string{DefaultCNIDir}, - pluginConfDir: DefaultNetDir, - pluginMaxConfNum: DefaultMaxConfNum, - prefix: DefaultPrefix, - }, - cniConfig: &cnilibrary.CNIConfig{ - Path: []string{DefaultCNIDir}, - }, - networkCount: 1, - } -} - -// New creates a new libcni instance. -func New(config ...CNIOpt) (CNI, error) { - cni := defaultCNIConfig() - var err error - for _, c := range config { - if err = c(cni); err != nil { - return nil, err - } - } - return cni, nil -} - -// Load loads the latest config from cni config files. -func (c *libcni) Load(opts ...CNIOpt) error { - var err error - c.Lock() - defer c.Unlock() - // Reset the networks on a load operation to ensure - // config happens on a clean slate - c.reset() - - for _, o := range opts { - if err = o(c); err != nil { - return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err)) - } - } - return nil -} - -// Status returns the status of CNI initialization. -func (c *libcni) Status() error { - c.RLock() - defer c.RUnlock() - if len(c.networks) < c.networkCount { - return ErrCNINotInitialized - } - return nil -} - -// Networks returns all the configured networks. -// NOTE: Caller MUST NOT modify anything in the returned array. -func (c *libcni) Networks() []*Network { - c.RLock() - defer c.RUnlock() - return append([]*Network{}, c.networks...) -} - -// Setup setups the network in the namespace -func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*CNIResult, error) { - if err := c.Status(); err != nil { - return nil, err - } - ns, err := newNamespace(id, path, opts...) - if err != nil { - return nil, err - } - var results []*current.Result - for _, network := range c.Networks() { - r, err := network.Attach(ctx, ns) - if err != nil { - return nil, err - } - results = append(results, r) - } - return c.GetCNIResultFromResults(results) -} - -// Remove removes the network config from the namespace -func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { - if err := c.Status(); err != nil { - return err - } - ns, err := newNamespace(id, path, opts...) - if err != nil { - return err - } - for _, network := range c.Networks() { - if err := network.Remove(ctx, ns); err != nil { - // Based on CNI spec v0.7.0, empty network namespace is allowed to - // do best effort cleanup. However, it is not handled consistently - // right now: - // https://github.com/containernetworking/plugins/issues/210 - // TODO(random-liu): Remove the error handling when the issue is - // fixed and the CNI spec v0.6.0 support is deprecated. - if path == "" && strings.Contains(err.Error(), "no such file or directory") { - continue - } - return err - } - } - return nil -} - -// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI -func (c *libcni) GetConfig() *ConfigResult { - c.RLock() - defer c.RUnlock() - r := &ConfigResult{ - PluginDirs: c.config.pluginDirs, - PluginConfDir: c.config.pluginConfDir, - PluginMaxConfNum: c.config.pluginMaxConfNum, - Prefix: c.config.prefix, - } - for _, network := range c.networks { - conf := &NetworkConfList{ - Name: network.config.Name, - CNIVersion: network.config.CNIVersion, - Source: string(network.config.Bytes), - } - for _, plugin := range network.config.Plugins { - conf.Plugins = append(conf.Plugins, &NetworkConf{ - Network: plugin.Network, - Source: string(plugin.Bytes), - }) - } - r.Networks = append(r.Networks, &ConfNetwork{ - Config: conf, - IFName: network.ifName, - }) - } - return r -} - -func (c *libcni) reset() { - c.networks = nil -} diff --git a/vendor/github.com/containerd/go-cni/errors.go b/vendor/github.com/containerd/go-cni/errors.go deleted file mode 100644 index 28761711..00000000 --- a/vendor/github.com/containerd/go-cni/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "github.com/pkg/errors" -) - -var ( - ErrCNINotInitialized = errors.New("cni plugin not initialized") - ErrInvalidConfig = errors.New("invalid cni config") - ErrNotFound = errors.New("not found") - ErrRead = errors.New("failed to read config file") - ErrInvalidResult = errors.New("invalid result") - ErrLoad = errors.New("failed to load cni config") -) - -// IsCNINotInitialized returns true if the error is due to cni config not being initialized -func IsCNINotInitialized(err error) bool { - return errors.Cause(err) == ErrCNINotInitialized -} - -// IsInvalidConfig returns true if the error is invalid cni config -func IsInvalidConfig(err error) bool { - return errors.Cause(err) == ErrInvalidConfig -} - -// IsNotFound returns true if the error is due to a missing config or result -func IsNotFound(err error) bool { - return errors.Cause(err) == ErrNotFound -} - -// IsReadFailure return true if the error is a config read failure -func IsReadFailure(err error) bool { - return errors.Cause(err) == ErrRead -} - -// IsInvalidResult return true if the error is due to invalid cni result -func IsInvalidResult(err error) bool { - return errors.Cause(err) == ErrInvalidResult -} diff --git a/vendor/github.com/containerd/go-cni/go.mod b/vendor/github.com/containerd/go-cni/go.mod deleted file mode 100644 index f3165f4a..00000000 --- a/vendor/github.com/containerd/go-cni/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module github.com/containerd/go-cni - -require ( - github.com/containernetworking/cni v0.7.1 - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/onsi/ginkgo v1.10.3 // indirect - github.com/onsi/gomega v1.7.1 // indirect - github.com/pkg/errors v0.8.0 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f // indirect - github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d -) - -go 1.12 diff --git a/vendor/github.com/containerd/go-cni/go.sum b/vendor/github.com/containerd/go-cni/go.sum deleted file mode 100644 index a0890f99..00000000 --- a/vendor/github.com/containerd/go-cni/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f h1:SrOsK2rwonEK9IsdNEU61zcTdKW68/PuV9wuHHpqngk= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d h1:YCdGqZILKLGzbyEYbdau30JBEXbKaKYmkBDU5JUW3D0= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/containerd/go-cni/helper.go b/vendor/github.com/containerd/go-cni/helper.go deleted file mode 100644 index 088cb9bc..00000000 --- a/vendor/github.com/containerd/go-cni/helper.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "fmt" - - "github.com/containernetworking/cni/pkg/types/current" -) - -func validateInterfaceConfig(ipConf *current.IPConfig, ifs int) error { - if ipConf == nil { - return fmt.Errorf("invalid IP configuration (nil)") - } - if ipConf.Interface != nil && *ipConf.Interface > ifs { - return fmt.Errorf("invalid IP configuration (interface number %d is > number of interfaces %d)", *ipConf.Interface, ifs) - } - return nil -} - -func getIfName(prefix string, i int) string { - return fmt.Sprintf("%s%d", prefix, i) -} - -func defaultInterface(prefix string) string { - return getIfName(prefix, 0) -} diff --git a/vendor/github.com/containerd/go-cni/namespace.go b/vendor/github.com/containerd/go-cni/namespace.go deleted file mode 100644 index ff14b01c..00000000 --- a/vendor/github.com/containerd/go-cni/namespace.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "context" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/containernetworking/cni/pkg/types/current" -) - -type Network struct { - cni cnilibrary.CNI - config *cnilibrary.NetworkConfigList - ifName string -} - -func (n *Network) Attach(ctx context.Context, ns *Namespace) (*current.Result, error) { - r, err := n.cni.AddNetworkList(ctx, n.config, ns.config(n.ifName)) - if err != nil { - return nil, err - } - return current.NewResultFromResult(r) -} - -func (n *Network) Remove(ctx context.Context, ns *Namespace) error { - return n.cni.DelNetworkList(ctx, n.config, ns.config(n.ifName)) -} - -type Namespace struct { - id string - path string - capabilityArgs map[string]interface{} - args map[string]string -} - -func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) { - ns := &Namespace{ - id: id, - path: path, - capabilityArgs: make(map[string]interface{}), - args: make(map[string]string), - } - for _, o := range opts { - if err := o(ns); err != nil { - return nil, err - } - } - return ns, nil -} - -func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf { - c := &cnilibrary.RuntimeConf{ - ContainerID: ns.id, - NetNS: ns.path, - IfName: ifName, - } - for k, v := range ns.args { - c.Args = append(c.Args, [2]string{k, v}) - } - c.CapabilityArgs = ns.capabilityArgs - return c -} diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go deleted file mode 100644 index 1fad5f69..00000000 --- a/vendor/github.com/containerd/go-cni/namespace_opts.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -type NamespaceOpts func(s *Namespace) error - -// Capabilities -func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["portMappings"] = portMapping - return nil - } -} - -func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["ipRanges"] = ipRanges - return nil - } -} - -// WithCapabilityBandWitdh adds support for traffic shaping: -// https://github.com/heptio/cni-plugins/tree/master/plugins/meta/bandwidth -func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["bandwidth"] = bandWidth - return nil - } -} - -// WithCapabilityDNS adds support for dns -func WithCapabilityDNS(dns DNS) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs["dns"] = dns - return nil - } -} - -func WithCapability(name string, capability interface{}) NamespaceOpts { - return func(c *Namespace) error { - c.capabilityArgs[name] = capability - return nil - } -} - -// Args -func WithLabels(labels map[string]string) NamespaceOpts { - return func(c *Namespace) error { - for k, v := range labels { - c.args[k] = v - } - return nil - } -} - -func WithArgs(k, v string) NamespaceOpts { - return func(c *Namespace) error { - c.args[k] = v - return nil - } -} diff --git a/vendor/github.com/containerd/go-cni/opts.go b/vendor/github.com/containerd/go-cni/opts.go deleted file mode 100644 index 1dd7869a..00000000 --- a/vendor/github.com/containerd/go-cni/opts.go +++ /dev/null @@ -1,263 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "sort" - "strings" - - cnilibrary "github.com/containernetworking/cni/libcni" - "github.com/pkg/errors" -) - -type CNIOpt func(c *libcni) error - -// WithInterfacePrefix sets the prefix for network interfaces -// e.g. eth or wlan -func WithInterfacePrefix(prefix string) CNIOpt { - return func(c *libcni) error { - c.prefix = prefix - return nil - } -} - -// WithPluginDir can be used to set the locations of -// the cni plugin binaries -func WithPluginDir(dirs []string) CNIOpt { - return func(c *libcni) error { - c.pluginDirs = dirs - c.cniConfig = &cnilibrary.CNIConfig{Path: dirs} - return nil - } -} - -// WithPluginConfDir can be used to configure the -// cni configuration directory. -func WithPluginConfDir(dir string) CNIOpt { - return func(c *libcni) error { - c.pluginConfDir = dir - return nil - } -} - -// WithPluginMaxConfNum can be used to configure the -// max cni plugin config file num. -func WithPluginMaxConfNum(max int) CNIOpt { - return func(c *libcni) error { - c.pluginMaxConfNum = max - return nil - } -} - -// WithMinNetworkCount can be used to configure the -// minimum networks to be configured and initialized -// for the status to report success. By default its 1. -func WithMinNetworkCount(count int) CNIOpt { - return func(c *libcni) error { - c.networkCount = count - return nil - } -} - -// WithLoNetwork can be used to load the loopback -// network config. -func WithLoNetwork(c *libcni) error { - loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{ -"cniVersion": "0.3.1", -"name": "cni-loopback", -"plugins": [{ - "type": "loopback" -}] -}`)) - - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: loConfig, - ifName: "lo", - }) - return nil -} - -// WithConf can be used to load config directly -// from byte. -func WithConf(bytes []byte) CNIOpt { - return WithConfIndex(bytes, 0) -} - -// WithConfIndex can be used to load config directly -// from byte and set the interface name's index. -func WithConfIndex(bytes []byte, index int) CNIOpt { - return func(c *libcni) error { - conf, err := cnilibrary.ConfFromBytes(bytes) - if err != nil { - return err - } - confList, err := cnilibrary.ConfListFromConf(conf) - if err != nil { - return err - } - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, index), - }) - return nil - } -} - -// WithConfFile can be used to load network config -// from an .conf file. Supported with absolute fileName -// with path only. -func WithConfFile(fileName string) CNIOpt { - return func(c *libcni) error { - conf, err := cnilibrary.ConfFromFile(fileName) - if err != nil { - return err - } - // upconvert to conf list - confList, err := cnilibrary.ConfListFromConf(conf) - if err != nil { - return err - } - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, 0), - }) - return nil - } -} - -// WithConfListBytes can be used to load network config list directly -// from byte -func WithConfListBytes(bytes []byte) CNIOpt { - return func(c *libcni) error { - confList, err := cnilibrary.ConfListFromBytes(bytes) - if err != nil { - return err - } - i := len(c.networks) - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - return nil - } -} - -// WithConfListFile can be used to load network config -// from an .conflist file. Supported with absolute fileName -// with path only. -func WithConfListFile(fileName string) CNIOpt { - return func(c *libcni) error { - confList, err := cnilibrary.ConfListFromFile(fileName) - if err != nil { - return err - } - i := len(c.networks) - c.networks = append(c.networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - return nil - } -} - -// WithDefaultConf can be used to detect the default network -// config file from the configured cni config directory and load -// it. -// Since the CNI spec does not specify a way to detect default networks, -// the convention chosen is - the first network configuration in the sorted -// list of network conf files as the default network. -func WithDefaultConf(c *libcni) error { - return loadFromConfDir(c, c.pluginMaxConfNum) -} - -// WithAllConf can be used to detect all network config -// files from the configured cni config directory and load -// them. -func WithAllConf(c *libcni) error { - return loadFromConfDir(c, 0) -} - -// loadFromConfDir detects network config files from the -// configured cni config directory and load them. max is -// the maximum network config to load (max i<= 0 means no limit). -func loadFromConfDir(c *libcni, max int) error { - files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"}) - switch { - case err != nil: - return errors.Wrapf(ErrRead, "failed to read config file: %v", err) - case len(files) == 0: - return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir) - } - - // files contains the network config files associated with cni network. - // Use lexicographical way as a defined order for network config files. - sort.Strings(files) - // Since the CNI spec does not specify a way to detect default networks, - // the convention chosen is - the first network configuration in the sorted - // list of network conf files as the default network and choose the default - // interface provided during init as the network interface for this default - // network. For every other network use a generated interface id. - i := 0 - var networks []*Network - for _, confFile := range files { - var confList *cnilibrary.NetworkConfigList - if strings.HasSuffix(confFile, ".conflist") { - confList, err = cnilibrary.ConfListFromFile(confFile) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err) - } - } else { - conf, err := cnilibrary.ConfFromFile(confFile) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err) - } - // Ensure the config has a "type" so we know what plugin to run. - // Also catches the case where somebody put a conflist into a conf file. - if conf.Network.Type == "" { - return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile) - } - - confList, err = cnilibrary.ConfListFromConf(conf) - if err != nil { - return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to CNI config list: %v", confFile, err) - } - } - if len(confList.Plugins) == 0 { - return errors.Wrapf(ErrInvalidConfig, "CNI config list in config file %s has no networks, skipping", confFile) - - } - networks = append(networks, &Network{ - cni: c.cniConfig, - config: confList, - ifName: getIfName(c.prefix, i), - }) - i++ - if i == max { - break - } - } - if len(networks) == 0 { - return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs) - } - c.networks = append(c.networks, networks...) - return nil -} diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go deleted file mode 100644 index c2ac9486..00000000 --- a/vendor/github.com/containerd/go-cni/result.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "net" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/current" - "github.com/pkg/errors" -) - -type IPConfig struct { - IP net.IP - Gateway net.IP -} - -type CNIResult struct { - Interfaces map[string]*Config - DNS []types.DNS - Routes []*types.Route -} - -type Config struct { - IPConfigs []*IPConfig - Mac string - Sandbox string -} - -// GetCNIResultFromResults returns a structured data containing the -// interface configuration for each of the interfaces created in the namespace. -// Conforms with -// Result: -// a) Interfaces list. Depending on the plugin, this can include the sandbox -// (eg, container or hypervisor) interface name and/or the host interface -// name, the hardware addresses of each interface, and details about the -// sandbox (if any) the interface is in. -// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses, -// gateways, and routes assigned to sandbox and/or host interfaces. -// c) DNS information. Dictionary that includes DNS information for nameservers, -// domain, search domains and options. -func (c *libcni) GetCNIResultFromResults(results []*current.Result) (*CNIResult, error) { - c.RLock() - defer c.RUnlock() - - r := &CNIResult{ - Interfaces: make(map[string]*Config), - } - - // Plugins may not need to return Interfaces in result if - // if there are no multiple interfaces created. In that case - // all configs should be applied against default interface - r.Interfaces[defaultInterface(c.prefix)] = &Config{} - - // Walk through all the results - for _, result := range results { - // Walk through all the interface in each result - for _, intf := range result.Interfaces { - r.Interfaces[intf.Name] = &Config{ - Mac: intf.Mac, - Sandbox: intf.Sandbox, - } - } - // Walk through all the IPs in the result and attach it to corresponding - // interfaces - for _, ipConf := range result.IPs { - if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil { - return nil, errors.Wrapf(ErrInvalidResult, "invalid interface config: %v", err) - } - name := c.getInterfaceName(result.Interfaces, ipConf) - r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs, - &IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway}) - } - r.DNS = append(r.DNS, result.DNS) - r.Routes = append(r.Routes, result.Routes...) - } - if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok { - return nil, errors.Wrapf(ErrNotFound, "default network not found for: %s", defaultInterface(c.prefix)) - } - return r, nil -} - -// getInterfaceName returns the interface name if the plugins -// return the result with associated interfaces. If interface -// is not present then default interface name is used -func (c *libcni) getInterfaceName(interfaces []*current.Interface, - ipConf *current.IPConfig) string { - if ipConf.Interface != nil { - return interfaces[*ipConf.Interface].Name - } - return defaultInterface(c.prefix) -} diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go deleted file mode 100644 index d9453c8d..00000000 --- a/vendor/github.com/containerd/go-cni/testutils.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "testing" -) - -func makeTmpDir(prefix string) (string, error) { - tmpDir, err := ioutil.TempDir(os.TempDir(), prefix) - if err != nil { - return "", err - } - return tmpDir, nil -} - -func makeFakeCNIConfig(t *testing.T) (string, string) { - cniDir, err := makeTmpDir("fakecni") - if err != nil { - t.Fatalf("Failed to create plugin config dir: %v", err) - } - - cniConfDir := path.Join(cniDir, "net.d") - err = os.MkdirAll(cniConfDir, 0777) - if err != nil { - t.Fatalf("Failed to create network config dir: %v", err) - } - - networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf") - f1, err := os.Create(networkConfig1) - if err != nil { - t.Fatalf("Failed to create network config %v: %v", f1, err) - } - networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf") - f2, err := os.Create(networkConfig2) - if err != nil { - t.Fatalf("Failed to create network config %v: %v", f2, err) - } - - cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin1", "fakecni") - _, err = f1.WriteString(cfg1) - if err != nil { - t.Fatalf("Failed to write network config file %v: %v", f1, err) - } - f1.Close() - cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin2", "fakecni") - _, err = f2.WriteString(cfg2) - if err != nil { - t.Fatalf("Failed to write network config file %v: %v", f2, err) - } - f2.Close() - return cniDir, cniConfDir -} - -func tearDownCNIConfig(t *testing.T, confDir string) { - err := os.RemoveAll(confDir) - if err != nil { - t.Fatalf("Failed to cleanup CNI configs: %v", err) - } -} diff --git a/vendor/github.com/containerd/go-cni/types.go b/vendor/github.com/containerd/go-cni/types.go deleted file mode 100644 index 0b7db1ee..00000000 --- a/vendor/github.com/containerd/go-cni/types.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cni - -const ( - CNIPluginName = "cni" - DefaultNetDir = "/etc/cni/net.d" - DefaultCNIDir = "/opt/cni/bin" - DefaultMaxConfNum = 1 - VendorCNIDirTemplate = "%s/opt/%s/bin" - DefaultPrefix = "eth" -) - -type config struct { - pluginDirs []string - pluginConfDir string - pluginMaxConfNum int - prefix string -} - -type PortMapping struct { - HostPort int32 - ContainerPort int32 - Protocol string - HostIP string -} - -type IPRanges struct { - Subnet string - RangeStart string - RangeEnd string - Gateway string -} - -// BandWidth defines the ingress/egress rate and burst limits -type BandWidth struct { - IngressRate uint64 - IngressBurst uint64 - EgressRate uint64 - EgressBurst uint64 -} - -// DNS defines the dns config -type DNS struct { - // List of DNS servers of the cluster. - Servers []string - // List of DNS search domains of the cluster. - Searches []string - // List of DNS options. - Options []string -} diff --git a/vendor/github.com/containerd/go-runc/.travis.yml b/vendor/github.com/containerd/go-runc/.travis.yml deleted file mode 100644 index dd60e9ba..00000000 --- a/vendor/github.com/containerd/go-runc/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -go: - - 1.12.x - - 1.13.x - -install: - - go get -t ./... - - go get -u github.com/vbatts/git-validation - - go get -u github.com/kunalkushwaha/ltag - -before_script: - - pushd ..; git clone https://github.com/containerd/project; popd - -script: - - DCO_VERBOSITY=-q ../project/script/validate/dco - - ../project/script/validate/fileheader ../project/ - - go test -v -race -covermode=atomic -coverprofile=coverage.txt ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/containerd/go-runc/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md deleted file mode 100644 index c899bdd7..00000000 --- a/vendor/github.com/containerd/go-runc/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# go-runc - -[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) -[![codecov](https://codecov.io/gh/containerd/go-runc/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/go-runc) - -This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. -It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! - -This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb) -or greater. - -## Docs - -Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). - -## Project details - -The go-runc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go deleted file mode 100644 index 8a30f679..00000000 --- a/vendor/github.com/containerd/go-runc/command_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "context" - "os" - "os/exec" - "strings" - "syscall" -) - -func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { - command := r.Command - if command == "" { - command = DefaultCommand - } - cmd := exec.CommandContext(context, command, append(r.args(), args...)...) - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: r.Setpgid, - } - cmd.Env = filterEnv(os.Environ(), "NOTIFY_SOCKET") // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd - if r.PdeathSignal != 0 { - cmd.SysProcAttr.Pdeathsig = r.PdeathSignal - } - - return cmd -} - -func filterEnv(in []string, names ...string) []string { - out := make([]string, 0, len(in)) -loop0: - for _, v := range in { - for _, k := range names { - if strings.HasPrefix(v, k+"=") { - continue loop0 - } - } - out = append(out, v) - } - return out -} diff --git a/vendor/github.com/containerd/go-runc/command_other.go b/vendor/github.com/containerd/go-runc/command_other.go deleted file mode 100644 index b8fd4b86..00000000 --- a/vendor/github.com/containerd/go-runc/command_other.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "context" - "os" - "os/exec" -) - -func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { - command := r.Command - if command == "" { - command = DefaultCommand - } - cmd := exec.CommandContext(context, command, append(r.args(), args...)...) - cmd.Env = os.Environ() - return cmd -} diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go deleted file mode 100644 index ff223e42..00000000 --- a/vendor/github.com/containerd/go-runc/console.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - - "github.com/containerd/console" - "golang.org/x/sys/unix" -) - -// NewConsoleSocket creates a new unix socket at the provided path to accept a -// pty master created by runc for use by the container -func NewConsoleSocket(path string) (*Socket, error) { - abs, err := filepath.Abs(path) - if err != nil { - return nil, err - } - addr, err := net.ResolveUnixAddr("unix", abs) - if err != nil { - return nil, err - } - l, err := net.ListenUnix("unix", addr) - if err != nil { - return nil, err - } - return &Socket{ - l: l, - }, nil -} - -// NewTempConsoleSocket returns a temp console socket for use with a container -// On Close(), the socket is deleted -func NewTempConsoleSocket() (*Socket, error) { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - dir, err := ioutil.TempDir(runtimeDir, "pty") - if err != nil { - return nil, err - } - abs, err := filepath.Abs(filepath.Join(dir, "pty.sock")) - if err != nil { - return nil, err - } - addr, err := net.ResolveUnixAddr("unix", abs) - if err != nil { - return nil, err - } - l, err := net.ListenUnix("unix", addr) - if err != nil { - return nil, err - } - if runtimeDir != "" { - if err := os.Chmod(abs, 0755|os.ModeSticky); err != nil { - return nil, err - } - } - return &Socket{ - l: l, - rmdir: true, - }, nil -} - -// Socket is a unix socket that accepts the pty master created by runc -type Socket struct { - rmdir bool - l *net.UnixListener -} - -// Path returns the path to the unix socket on disk -func (c *Socket) Path() string { - return c.l.Addr().String() -} - -// recvFd waits for a file descriptor to be sent over the given AF_UNIX -// socket. The file name of the remote file descriptor will be recreated -// locally (it is sent as non-auxiliary data in the same payload). -func recvFd(socket *net.UnixConn) (*os.File, error) { - const MaxNameLen = 4096 - var oobSpace = unix.CmsgSpace(4) - - name := make([]byte, MaxNameLen) - oob := make([]byte, oobSpace) - - n, oobn, _, _, err := socket.ReadMsgUnix(name, oob) - if err != nil { - return nil, err - } - - if n >= MaxNameLen || oobn != oobSpace { - return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) - } - - // Truncate. - name = name[:n] - oob = oob[:oobn] - - scms, err := unix.ParseSocketControlMessage(oob) - if err != nil { - return nil, err - } - if len(scms) != 1 { - return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) - } - scm := scms[0] - - fds, err := unix.ParseUnixRights(&scm) - if err != nil { - return nil, err - } - if len(fds) != 1 { - return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) - } - fd := uintptr(fds[0]) - - return os.NewFile(fd, string(name)), nil -} - -// ReceiveMaster blocks until the socket receives the pty master -func (c *Socket) ReceiveMaster() (console.Console, error) { - conn, err := c.l.Accept() - if err != nil { - return nil, err - } - defer conn.Close() - uc, ok := conn.(*net.UnixConn) - if !ok { - return nil, fmt.Errorf("received connection which was not a unix socket") - } - f, err := recvFd(uc) - if err != nil { - return nil, err - } - return console.ConsoleFromFile(f) -} - -// Close closes the unix socket -func (c *Socket) Close() error { - err := c.l.Close() - if c.rmdir { - if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil { - err = rerr - } - } - return err -} diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go deleted file mode 100644 index 107381a5..00000000 --- a/vendor/github.com/containerd/go-runc/container.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import "time" - -// Container hold information for a runc container -type Container struct { - ID string `json:"id"` - Pid int `json:"pid"` - Status string `json:"status"` - Bundle string `json:"bundle"` - Rootfs string `json:"rootfs"` - Created time.Time `json:"created"` - Annotations map[string]string `json:"annotations"` -} diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go deleted file mode 100644 index d610aeb3..00000000 --- a/vendor/github.com/containerd/go-runc/events.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -type Event struct { - // Type are the event type generated by runc - // If the type is "error" then check the Err field on the event for - // the actual error - Type string `json:"type"` - ID string `json:"id"` - Stats *Stats `json:"data,omitempty"` - // Err has a read error if we were unable to decode the event from runc - Err error `json:"-"` -} - -type Stats struct { - Cpu Cpu `json:"cpu"` - Memory Memory `json:"memory"` - Pids Pids `json:"pids"` - Blkio Blkio `json:"blkio"` - Hugetlb map[string]Hugetlb `json:"hugetlb"` -} - -type Hugetlb struct { - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type BlkioEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` -} - -type Blkio struct { - IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` - IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` - IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` - IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` - IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` - IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` - IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` - SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` -} - -type Pids struct { - Current uint64 `json:"current,omitempty"` - Limit uint64 `json:"limit,omitempty"` -} - -type Throttling struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` - ThrottledTime uint64 `json:"throttledTime,omitempty"` -} - -type CpuUsage struct { - // Units: nanoseconds. - Total uint64 `json:"total,omitempty"` - Percpu []uint64 `json:"percpu,omitempty"` - Kernel uint64 `json:"kernel"` - User uint64 `json:"user"` -} - -type Cpu struct { - Usage CpuUsage `json:"usage,omitempty"` - Throttling Throttling `json:"throttling,omitempty"` -} - -type MemoryEntry struct { - Limit uint64 `json:"limit"` - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type Memory struct { - Cache uint64 `json:"cache,omitempty"` - Usage MemoryEntry `json:"usage,omitempty"` - Swap MemoryEntry `json:"swap,omitempty"` - Kernel MemoryEntry `json:"kernel,omitempty"` - KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` - Raw map[string]uint64 `json:"raw,omitempty"` -} diff --git a/vendor/github.com/containerd/go-runc/go.mod b/vendor/github.com/containerd/go-runc/go.mod deleted file mode 100644 index d833ee16..00000000 --- a/vendor/github.com/containerd/go-runc/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/containerd/go-runc - -go 1.13 - -require ( - github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e - github.com/opencontainers/runtime-spec v1.0.1 - github.com/pkg/errors v0.8.1 - golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 -) diff --git a/vendor/github.com/containerd/go-runc/go.sum b/vendor/github.com/containerd/go-runc/go.sum deleted file mode 100644 index f7d00e37..00000000 --- a/vendor/github.com/containerd/go-runc/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e h1:GdiIYd8ZDOrT++e1NjhSD4rGt9zaJukHm4rt5F4mRQc= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go deleted file mode 100644 index 6cf0410c..00000000 --- a/vendor/github.com/containerd/go-runc/io.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "io" - "os" - "os/exec" -) - -type IO interface { - io.Closer - Stdin() io.WriteCloser - Stdout() io.ReadCloser - Stderr() io.ReadCloser - Set(*exec.Cmd) -} - -type StartCloser interface { - CloseAfterStart() error -} - -// IOOpt sets I/O creation options -type IOOpt func(*IOOption) - -// IOOption holds I/O creation options -type IOOption struct { - OpenStdin bool - OpenStdout bool - OpenStderr bool -} - -func defaultIOOption() *IOOption { - return &IOOption{ - OpenStdin: true, - OpenStdout: true, - OpenStderr: true, - } -} - -func newPipe() (*pipe, error) { - r, w, err := os.Pipe() - if err != nil { - return nil, err - } - return &pipe{ - r: r, - w: w, - }, nil -} - -type pipe struct { - r *os.File - w *os.File -} - -func (p *pipe) Close() error { - err := p.w.Close() - if rerr := p.r.Close(); err == nil { - err = rerr - } - return err -} - -type pipeIO struct { - in *pipe - out *pipe - err *pipe -} - -func (i *pipeIO) Stdin() io.WriteCloser { - if i.in == nil { - return nil - } - return i.in.w -} - -func (i *pipeIO) Stdout() io.ReadCloser { - if i.out == nil { - return nil - } - return i.out.r -} - -func (i *pipeIO) Stderr() io.ReadCloser { - if i.err == nil { - return nil - } - return i.err.r -} - -func (i *pipeIO) Close() error { - var err error - for _, v := range []*pipe{ - i.in, - i.out, - i.err, - } { - if v != nil { - if cerr := v.Close(); err == nil { - err = cerr - } - } - } - return err -} - -func (i *pipeIO) CloseAfterStart() error { - for _, f := range []*pipe{ - i.out, - i.err, - } { - if f != nil { - f.w.Close() - } - } - return nil -} - -// Set sets the io to the exec.Cmd -func (i *pipeIO) Set(cmd *exec.Cmd) { - if i.in != nil { - cmd.Stdin = i.in.r - } - if i.out != nil { - cmd.Stdout = i.out.w - } - if i.err != nil { - cmd.Stderr = i.err.w - } -} - -func NewSTDIO() (IO, error) { - return &stdio{}, nil -} - -type stdio struct { -} - -func (s *stdio) Close() error { - return nil -} - -func (s *stdio) Set(cmd *exec.Cmd) { - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr -} - -func (s *stdio) Stdin() io.WriteCloser { - return os.Stdin -} - -func (s *stdio) Stdout() io.ReadCloser { - return os.Stdout -} - -func (s *stdio) Stderr() io.ReadCloser { - return os.Stderr -} - -// NewNullIO returns IO setup for /dev/null use with runc -func NewNullIO() (IO, error) { - f, err := os.Open(os.DevNull) - if err != nil { - return nil, err - } - return &nullIO{ - devNull: f, - }, nil -} - -type nullIO struct { - devNull *os.File -} - -func (n *nullIO) Close() error { - // this should be closed after start but if not - // make sure we close the file but don't return the error - n.devNull.Close() - return nil -} - -func (n *nullIO) Stdin() io.WriteCloser { - return nil -} - -func (n *nullIO) Stdout() io.ReadCloser { - return nil -} - -func (n *nullIO) Stderr() io.ReadCloser { - return nil -} - -func (n *nullIO) Set(c *exec.Cmd) { - // don't set STDIN here - c.Stdout = n.devNull - c.Stderr = n.devNull -} - -func (n *nullIO) CloseAfterStart() error { - return n.devNull.Close() -} diff --git a/vendor/github.com/containerd/go-runc/io_unix.go b/vendor/github.com/containerd/go-runc/io_unix.go deleted file mode 100644 index 567cd072..00000000 --- a/vendor/github.com/containerd/go-runc/io_unix.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// NewPipeIO creates pipe pairs to be used with runc -func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { - option := defaultIOOption() - for _, o := range opts { - o(option) - } - var ( - pipes []*pipe - stdin, stdout, stderr *pipe - ) - // cleanup in case of an error - defer func() { - if err != nil { - for _, p := range pipes { - p.Close() - } - } - }() - if option.OpenStdin { - if stdin, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdin) - if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stdin") - } - } - if option.OpenStdout { - if stdout, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdout) - if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stdout") - } - } - if option.OpenStderr { - if stderr, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stderr) - if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stderr") - } - } - return &pipeIO{ - in: stdin, - out: stdout, - err: stderr, - }, nil -} diff --git a/vendor/github.com/containerd/go-runc/io_windows.go b/vendor/github.com/containerd/go-runc/io_windows.go deleted file mode 100644 index fc56ac4f..00000000 --- a/vendor/github.com/containerd/go-runc/io_windows.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -// NewPipeIO creates pipe pairs to be used with runc -func NewPipeIO(opts ...IOOpt) (i IO, err error) { - option := defaultIOOption() - for _, o := range opts { - o(option) - } - var ( - pipes []*pipe - stdin, stdout, stderr *pipe - ) - // cleanup in case of an error - defer func() { - if err != nil { - for _, p := range pipes { - p.Close() - } - } - }() - if option.OpenStdin { - if stdin, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdin) - } - if option.OpenStdout { - if stdout, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stdout) - } - if option.OpenStderr { - if stderr, err = newPipe(); err != nil { - return nil, err - } - pipes = append(pipes, stderr) - } - return &pipeIO{ - in: stdin, - out: stdout, - err: stderr, - }, nil -} diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go deleted file mode 100644 index ff06a3fc..00000000 --- a/vendor/github.com/containerd/go-runc/monitor.go +++ /dev/null @@ -1,76 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "os/exec" - "syscall" - "time" -) - -var Monitor ProcessMonitor = &defaultMonitor{} - -type Exit struct { - Timestamp time.Time - Pid int - Status int -} - -// ProcessMonitor is an interface for process monitoring -// -// It allows daemons using go-runc to have a SIGCHLD handler -// to handle exits without introducing races between the handler -// and go's exec.Cmd -// These methods should match the methods exposed by exec.Cmd to provide -// a consistent experience for the caller -type ProcessMonitor interface { - Start(*exec.Cmd) (chan Exit, error) - Wait(*exec.Cmd, chan Exit) (int, error) -} - -type defaultMonitor struct { -} - -func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) { - if err := c.Start(); err != nil { - return nil, err - } - ec := make(chan Exit, 1) - go func() { - var status int - if err := c.Wait(); err != nil { - status = 255 - if exitErr, ok := err.(*exec.ExitError); ok { - if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok { - status = ws.ExitStatus() - } - } - } - ec <- Exit{ - Timestamp: time.Now(), - Pid: c.Process.Pid, - Status: status, - } - close(ec) - }() - return ec, nil -} - -func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) { - e := <-ec - return e.Status, nil -} diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go deleted file mode 100644 index c3a95af2..00000000 --- a/vendor/github.com/containerd/go-runc/runc.go +++ /dev/null @@ -1,715 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Format is the type of log formatting options avaliable -type Format string - -// TopBody represents the structured data of the full ps output -type TopResults struct { - // Processes running in the container, where each is process is an array of values corresponding to the headers - Processes [][]string `json:"Processes"` - - // Headers are the names of the columns - Headers []string `json:"Headers"` -} - -const ( - none Format = "" - JSON Format = "json" - Text Format = "text" - // DefaultCommand is the default command for Runc - DefaultCommand = "runc" -) - -// Runc is the client to the runc cli -type Runc struct { - //If command is empty, DefaultCommand is used - Command string - Root string - Debug bool - Log string - LogFormat Format - PdeathSignal syscall.Signal - Setpgid bool - Criu string - SystemdCgroup bool - Rootless *bool // nil stands for "auto" -} - -// List returns all containers created inside the provided runc root directory -func (r *Runc) List(context context.Context) ([]*Container, error) { - data, err := cmdOutput(r.command(context, "list", "--format=json"), false) - defer putBuf(data) - if err != nil { - return nil, err - } - var out []*Container - if err := json.Unmarshal(data.Bytes(), &out); err != nil { - return nil, err - } - return out, nil -} - -// State returns the state for the container provided by id -func (r *Runc) State(context context.Context, id string) (*Container, error) { - data, err := cmdOutput(r.command(context, "state", id), true) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - var c Container - if err := json.Unmarshal(data.Bytes(), &c); err != nil { - return nil, err - } - return &c, nil -} - -type ConsoleSocket interface { - Path() string -} - -type CreateOpts struct { - IO - // PidFile is a path to where a pid file should be created - PidFile string - ConsoleSocket ConsoleSocket - Detach bool - NoPivot bool - NoNewKeyring bool - ExtraFiles []*os.File -} - -func (o *CreateOpts) args() (out []string, err error) { - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.NoPivot { - out = append(out, "--no-pivot") - } - if o.NoNewKeyring { - out = append(out, "--no-new-keyring") - } - if o.Detach { - out = append(out, "--detach") - } - if o.ExtraFiles != nil { - out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles))) - } - return out, nil -} - -// Create creates a new container and returns its pid if it was created successfully -func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { - args := []string{"create", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - cmd.ExtraFiles = opts.ExtraFiles - - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) - } - return nil - } - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err -} - -// Start will start an already created container -func (r *Runc) Start(context context.Context, id string) error { - return r.runOrError(r.command(context, "start", id)) -} - -type ExecOpts struct { - IO - PidFile string - ConsoleSocket ConsoleSocket - Detach bool -} - -func (o *ExecOpts) args() (out []string, err error) { - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.Detach { - out = append(out, "--detach") - } - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - return out, nil -} - -// Exec executres and additional process inside the container based on a full -// OCI Process specification -func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { - f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runc-process") - if err != nil { - return err - } - defer os.Remove(f.Name()) - err = json.NewEncoder(f).Encode(spec) - f.Close() - if err != nil { - return err - } - args := []string{"exec", "--process", f.Name()} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) - } - return nil - } - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err -} - -// Run runs the create, start, delete lifecycle of the container -// and returns its exit status after it has exited -func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { - args := []string{"run", "--bundle", bundle} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return -1, err - } - args = append(args, oargs...) - } - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - ec, err := Monitor.Start(cmd) - if err != nil { - return -1, err - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return status, err -} - -type DeleteOpts struct { - Force bool -} - -func (o *DeleteOpts) args() (out []string) { - if o.Force { - out = append(out, "--force") - } - return out -} - -// Delete deletes the container -func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) error { - args := []string{"delete"} - if opts != nil { - args = append(args, opts.args()...) - } - return r.runOrError(r.command(context, append(args, id)...)) -} - -// KillOpts specifies options for killing a container and its processes -type KillOpts struct { - All bool -} - -func (o *KillOpts) args() (out []string) { - if o.All { - out = append(out, "--all") - } - return out -} - -// Kill sends the specified signal to the container -func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { - args := []string{ - "kill", - } - if opts != nil { - args = append(args, opts.args()...) - } - return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) -} - -// Stats return the stats for a container like cpu, memory, and io -func (r *Runc) Stats(context context.Context, id string) (*Stats, error) { - cmd := r.command(context, "events", "--stats", id) - rd, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - ec, err := Monitor.Start(cmd) - if err != nil { - return nil, err - } - defer func() { - rd.Close() - Monitor.Wait(cmd, ec) - }() - var e Event - if err := json.NewDecoder(rd).Decode(&e); err != nil { - return nil, err - } - return e.Stats, nil -} - -// Events returns an event stream from runc for a container with stats and OOM notifications -func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) { - cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) - rd, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - ec, err := Monitor.Start(cmd) - if err != nil { - rd.Close() - return nil, err - } - var ( - dec = json.NewDecoder(rd) - c = make(chan *Event, 128) - ) - go func() { - defer func() { - close(c) - rd.Close() - Monitor.Wait(cmd, ec) - }() - for { - var e Event - if err := dec.Decode(&e); err != nil { - if err == io.EOF { - return - } - e = Event{ - Type: "error", - Err: err, - } - } - c <- &e - } - }() - return c, nil -} - -// Pause the container with the provided id -func (r *Runc) Pause(context context.Context, id string) error { - return r.runOrError(r.command(context, "pause", id)) -} - -// Resume the container with the provided id -func (r *Runc) Resume(context context.Context, id string) error { - return r.runOrError(r.command(context, "resume", id)) -} - -// Ps lists all the processes inside the container returning their pids -func (r *Runc) Ps(context context.Context, id string) ([]int, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - var pids []int - if err := json.Unmarshal(data.Bytes(), &pids); err != nil { - return nil, err - } - return pids, nil -} - -// Top lists all the processes inside the container returning the full ps data -func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopResults, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true) - defer putBuf(data) - if err != nil { - return nil, fmt.Errorf("%s: %s", err, data.String()) - } - - topResults, err := ParsePSOutput(data.Bytes()) - if err != nil { - return nil, fmt.Errorf("%s: ", err) - } - return topResults, nil -} - -type CheckpointOpts struct { - // ImagePath is the path for saving the criu image file - ImagePath string - // WorkDir is the working directory for criu - WorkDir string - // ParentPath is the path for previous image files from a pre-dump - ParentPath string - // AllowOpenTCP allows open tcp connections to be checkpointed - AllowOpenTCP bool - // AllowExternalUnixSockets allows external unix sockets to be checkpointed - AllowExternalUnixSockets bool - // AllowTerminal allows the terminal(pty) to be checkpointed with a container - AllowTerminal bool - // CriuPageServer is the address:port for the criu page server - CriuPageServer string - // FileLocks handle file locks held by the container - FileLocks bool - // Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups - Cgroups CgroupMode - // EmptyNamespaces creates a namespace for the container but does not save its properties - // Provide the namespaces you wish to be checkpointed without their settings on restore - EmptyNamespaces []string -} - -type CgroupMode string - -const ( - Soft CgroupMode = "soft" - Full CgroupMode = "full" - Strict CgroupMode = "strict" -) - -func (o *CheckpointOpts) args() (out []string) { - if o.ImagePath != "" { - out = append(out, "--image-path", o.ImagePath) - } - if o.WorkDir != "" { - out = append(out, "--work-path", o.WorkDir) - } - if o.ParentPath != "" { - out = append(out, "--parent-path", o.ParentPath) - } - if o.AllowOpenTCP { - out = append(out, "--tcp-established") - } - if o.AllowExternalUnixSockets { - out = append(out, "--ext-unix-sk") - } - if o.AllowTerminal { - out = append(out, "--shell-job") - } - if o.CriuPageServer != "" { - out = append(out, "--page-server", o.CriuPageServer) - } - if o.FileLocks { - out = append(out, "--file-locks") - } - if string(o.Cgroups) != "" { - out = append(out, "--manage-cgroups-mode", string(o.Cgroups)) - } - for _, ns := range o.EmptyNamespaces { - out = append(out, "--empty-ns", ns) - } - return out -} - -type CheckpointAction func([]string) []string - -// LeaveRunning keeps the container running after the checkpoint has been completed -func LeaveRunning(args []string) []string { - return append(args, "--leave-running") -} - -// PreDump allows a pre-dump of the checkpoint to be made and completed later -func PreDump(args []string) []string { - return append(args, "--pre-dump") -} - -// Checkpoint allows you to checkpoint a container using criu -func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { - args := []string{"checkpoint"} - if opts != nil { - args = append(args, opts.args()...) - } - for _, a := range actions { - args = a(args) - } - return r.runOrError(r.command(context, append(args, id)...)) -} - -type RestoreOpts struct { - CheckpointOpts - IO - - Detach bool - PidFile string - NoSubreaper bool - NoPivot bool - ConsoleSocket ConsoleSocket -} - -func (o *RestoreOpts) args() ([]string, error) { - out := o.CheckpointOpts.args() - if o.Detach { - out = append(out, "--detach") - } - if o.PidFile != "" { - abs, err := filepath.Abs(o.PidFile) - if err != nil { - return nil, err - } - out = append(out, "--pid-file", abs) - } - if o.ConsoleSocket != nil { - out = append(out, "--console-socket", o.ConsoleSocket.Path()) - } - if o.NoPivot { - out = append(out, "--no-pivot") - } - if o.NoSubreaper { - out = append(out, "-no-subreaper") - } - return out, nil -} - -// Restore restores a container with the provide id from an existing checkpoint -func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) { - args := []string{"restore"} - if opts != nil { - oargs, err := opts.args() - if err != nil { - return -1, err - } - args = append(args, oargs...) - } - args = append(args, "--bundle", bundle) - cmd := r.command(context, append(args, id)...) - if opts != nil && opts.IO != nil { - opts.Set(cmd) - } - ec, err := Monitor.Start(cmd) - if err != nil { - return -1, err - } - if opts != nil && opts.IO != nil { - if c, ok := opts.IO.(StartCloser); ok { - if err := c.CloseAfterStart(); err != nil { - return -1, err - } - } - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return status, err -} - -// Update updates the current container with the provided resource spec -func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error { - buf := getBuf() - defer putBuf(buf) - - if err := json.NewEncoder(buf).Encode(resources); err != nil { - return err - } - args := []string{"update", "--resources", "-", id} - cmd := r.command(context, args...) - cmd.Stdin = buf - return r.runOrError(cmd) -} - -var ErrParseRuncVersion = errors.New("unable to parse runc version") - -type Version struct { - Runc string - Commit string - Spec string -} - -// Version returns the runc and runtime-spec versions -func (r *Runc) Version(context context.Context) (Version, error) { - data, err := cmdOutput(r.command(context, "--version"), false) - defer putBuf(data) - if err != nil { - return Version{}, err - } - return parseVersion(data.Bytes()) -} - -func parseVersion(data []byte) (Version, error) { - var v Version - parts := strings.Split(strings.TrimSpace(string(data)), "\n") - - if len(parts) > 0 { - if !strings.HasPrefix(parts[0], "runc version ") { - return v, nil - } - v.Runc = parts[0][13:] - - for _, part := range parts[1:] { - if strings.HasPrefix(part, "commit: ") { - v.Commit = part[8:] - } else if strings.HasPrefix(part, "spec: ") { - v.Spec = part[6:] - } - } - } - - return v, nil -} - -func (r *Runc) args() (out []string) { - if r.Root != "" { - out = append(out, "--root", r.Root) - } - if r.Debug { - out = append(out, "--debug") - } - if r.Log != "" { - out = append(out, "--log", r.Log) - } - if r.LogFormat != none { - out = append(out, "--log-format", string(r.LogFormat)) - } - if r.Criu != "" { - out = append(out, "--criu", r.Criu) - } - if r.SystemdCgroup { - out = append(out, "--systemd-cgroup") - } - if r.Rootless != nil { - // nil stands for "auto" (differs from explicit "false") - out = append(out, "--rootless="+strconv.FormatBool(*r.Rootless)) - } - return out -} - -// runOrError will run the provided command. If an error is -// encountered and neither Stdout or Stderr was set the error and the -// stderr of the command will be returned in the format of : -// -func (r *Runc) runOrError(cmd *exec.Cmd) error { - if cmd.Stdout != nil || cmd.Stderr != nil { - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - return err - } - data, err := cmdOutput(cmd, true) - defer putBuf(data) - if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) - } - return nil -} - -// callers of cmdOutput are expected to call putBuf on the returned Buffer -// to ensure it is released back to the shared pool after use. -func cmdOutput(cmd *exec.Cmd, combined bool) (*bytes.Buffer, error) { - b := getBuf() - - cmd.Stdout = b - if combined { - cmd.Stderr = b - } - ec, err := Monitor.Start(cmd) - if err != nil { - return nil, err - } - - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) - } - - return b, err -} diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go deleted file mode 100644 index 948b6336..00000000 --- a/vendor/github.com/containerd/go-runc/utils.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package runc - -import ( - "bytes" - "io/ioutil" - "strconv" - "strings" - "sync" - "syscall" -) - -// ReadPidFile reads the pid file at the provided path and returns -// the pid or an error if the read and conversion is unsuccessful -func ReadPidFile(path string) (int, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return -1, err - } - return strconv.Atoi(string(data)) -} - -const exitSignalOffset = 128 - -// exitStatus returns the correct exit status for a process based on if it -// was signaled or exited cleanly -func exitStatus(status syscall.WaitStatus) int { - if status.Signaled() { - return exitSignalOffset + int(status.Signal()) - } - return status.ExitStatus() -} - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -func getBuf() *bytes.Buffer { - return bytesBufferPool.Get().(*bytes.Buffer) -} - -func putBuf(b *bytes.Buffer) { - if b == nil { - return - } - - b.Reset() - bytesBufferPool.Put(b) -} - -// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces -func fieldsASCII(s string) []string { - fn := func(r rune) bool { - switch r { - case '\t', '\n', '\f', '\r', ' ': - return true - } - return false - } - return strings.FieldsFunc(s, fn) -} - -// ParsePSOutput parses the runtime's ps raw output and returns a TopResults -func ParsePSOutput(output []byte) (*TopResults, error) { - topResults := &TopResults{} - - lines := strings.Split(string(output), "\n") - topResults.Headers = fieldsASCII(lines[0]) - - pidIndex := -1 - for i, name := range topResults.Headers { - if name == "PID" { - pidIndex = i - } - } - - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - - fields := fieldsASCII(line) - - if fields[pidIndex] == "-" { - continue - } - - process := fields[:len(topResults.Headers)-1] - process = append(process, strings.Join(fields[len(topResults.Headers)-1:], " ")) - topResults.Processes = append(topResults.Processes, process) - - } - return topResults, nil -} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE deleted file mode 100644 index 8f71f43f..00000000 --- a/vendor/github.com/containernetworking/cni/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go deleted file mode 100644 index 0f14d342..00000000 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package libcni - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containernetworking/cni/pkg/invoke" - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/version" -) - -var ( - CacheDir = "/var/lib/cni" -) - -// A RuntimeConf holds the arguments to one invocation of a CNI plugin -// excepting the network configuration, with the nested exception that -// the `runtimeConfig` from the network configuration is included -// here. -type RuntimeConf struct { - ContainerID string - NetNS string - IfName string - Args [][2]string - // A dictionary of capability-specific data passed by the runtime - // to plugins as top-level keys in the 'runtimeConfig' dictionary - // of the plugin's stdin data. libcni will ensure that only keys - // in this map which match the capabilities of the plugin are passed - // to the plugin - CapabilityArgs map[string]interface{} - - // A cache directory in which to library data. Defaults to CacheDir - CacheDir string -} - -type NetworkConfig struct { - Network *types.NetConf - Bytes []byte -} - -type NetworkConfigList struct { - Name string - CNIVersion string - DisableCheck bool - Plugins []*NetworkConfig - Bytes []byte -} - -type CNI interface { - AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) - CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error - DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error - GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) - - AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error - GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) - - ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) - ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) -} - -type CNIConfig struct { - Path []string - exec invoke.Exec -} - -// CNIConfig implements the CNI interface -var _ CNI = &CNIConfig{} - -// NewCNIConfig returns a new CNIConfig object that will search for plugins -// in the given paths and use the given exec interface to run those plugins, -// or if the exec interface is not given, will use a default exec handler. -func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { - return &CNIConfig{ - Path: path, - exec: exec, - } -} - -func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { - var err error - - inject := map[string]interface{}{ - "name": name, - "cniVersion": cniVersion, - } - // Add previous plugin result - if prevResult != nil { - inject["prevResult"] = prevResult - } - - // Ensure every config uses the same name and version - orig, err = InjectConf(orig, inject) - if err != nil { - return nil, err - } - - return injectRuntimeConfig(orig, rt) -} - -// This function takes a libcni RuntimeConf structure and injects values into -// a "runtimeConfig" dictionary in the CNI network configuration JSON that -// will be passed to the plugin on stdin. -// -// Only "capabilities arguments" passed by the runtime are currently injected. -// These capabilities arguments are filtered through the plugin's advertised -// capabilities from its config JSON, and any keys in the CapabilityArgs -// matching plugin capabilities are added to the "runtimeConfig" dictionary -// sent to the plugin via JSON on stdin. For example, if the plugin's -// capabilities include "portMappings", and the CapabilityArgs map includes a -// "portMappings" key, that key and its value are added to the "runtimeConfig" -// dictionary to be passed to the plugin's stdin. -func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { - var err error - - rc := make(map[string]interface{}) - for capability, supported := range orig.Network.Capabilities { - if !supported { - continue - } - if data, ok := rt.CapabilityArgs[capability]; ok { - rc[capability] = data - } - } - - if len(rc) > 0 { - orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) - if err != nil { - return nil, err - } - } - - return orig, nil -} - -// ensure we have a usable exec if the CNIConfig was not given one -func (c *CNIConfig) ensureExec() invoke.Exec { - if c.exec == nil { - c.exec = &invoke.DefaultExec{ - RawExec: &invoke.RawExec{Stderr: os.Stderr}, - PluginDecoder: version.PluginDecoder{}, - } - } - return c.exec -} - -func getResultCacheFilePath(netName string, rt *RuntimeConf) string { - cacheDir := rt.CacheDir - if cacheDir == "" { - cacheDir = CacheDir - } - return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)) -} - -func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { - data, err := json.Marshal(result) - if err != nil { - return err - } - fname := getResultCacheFilePath(netName, rt) - if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { - return err - } - return ioutil.WriteFile(fname, data, 0600) -} - -func delCachedResult(netName string, rt *RuntimeConf) error { - fname := getResultCacheFilePath(netName, rt) - return os.Remove(fname) -} - -func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { - fname := getResultCacheFilePath(netName, rt) - data, err := ioutil.ReadFile(fname) - if err != nil { - // Ignore read errors; the cached result may not exist on-disk - return nil, nil - } - - // Read the version of the cached result - decoder := version.ConfigDecoder{} - resultCniVersion, err := decoder.Decode(data) - if err != nil { - return nil, err - } - - // Ensure we can understand the result - result, err := version.NewResult(resultCniVersion, data) - if err != nil { - return nil, err - } - - // Convert to the config version to ensure plugins get prevResult - // in the same version as the config. The cached result version - // should match the config version unless the config was changed - // while the container was running. - result, err = result.GetAsVersion(cniVersion) - if err != nil && resultCniVersion != cniVersion { - return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) - } - return result, err -} - -// GetNetworkListCachedResult returns the cached Result of the previous -// previous AddNetworkList() operation for a network list, or an error. -func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - return getCachedResult(list.Name, list.CNIVersion, rt) -} - -// GetNetworkCachedResult returns the cached Result of the previous -// previous AddNetwork() operation for a network, or an error. -func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) -} - -func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return nil, err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return nil, err - } - - return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) -} - -// AddNetworkList executes a sequence of plugins with the ADD command -func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { - var err error - var result types.Result - for _, net := range list.Plugins { - result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) - if err != nil { - return nil, err - } - } - - if err = setCachedResult(result, list.Name, rt); err != nil { - return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err) - } - - return result, nil -} - -func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return err - } - - return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) -} - -// CheckNetworkList executes a sequence of plugins with the CHECK command -func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { - // CHECK was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { - return err - } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) - } - - if list.DisableCheck { - return nil - } - - cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) - } - - for _, net := range list.Plugins { - if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - } - - return nil -} - -func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) - if err != nil { - return err - } - - newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) - if err != nil { - return err - } - - return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) -} - -// DelNetworkList executes a sequence of plugins with the DEL command -func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { - var cachedResult types.Result - - // Cached result on DEL was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { - return err - } else if gtet { - cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) - } - } - - for i := len(list.Plugins) - 1; i >= 0; i-- { - net := list.Plugins[i] - if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - } - _ = delCachedResult(list.Name, rt) - - return nil -} - -// AddNetwork executes the plugin with the ADD command -func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) - if err != nil { - return nil, err - } - - if err = setCachedResult(result, net.Network.Name, rt); err != nil { - return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) - } - - return result, nil -} - -// CheckNetwork executes the plugin with the CHECK command -func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { - // CHECK was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { - return err - } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) - } - - cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) - } - return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) -} - -// DelNetwork executes the plugin with the DEL command -func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { - var cachedResult types.Result - - // Cached result on DEL was added in CNI spec version 0.4.0 and higher - if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { - return err - } else if gtet { - cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) - } - } - - if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { - return err - } - _ = delCachedResult(net.Network.Name, rt) - return nil -} - -// ValidateNetworkList checks that a configuration is reasonably valid. -// - all the specified plugins exist on disk -// - every plugin supports the desired version. -// -// Returns a list of all capabilities supported by the configuration, or error -func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { - version := list.CNIVersion - - // holding map for seen caps (in case of duplicates) - caps := map[string]interface{}{} - - errs := []error{} - for _, net := range list.Plugins { - if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { - errs = append(errs, err) - } - for c, enabled := range net.Network.Capabilities { - if !enabled { - continue - } - caps[c] = struct{}{} - } - } - - if len(errs) > 0 { - return nil, fmt.Errorf("%v", errs) - } - - // make caps list - cc := make([]string, 0, len(caps)) - for c := range caps { - cc = append(cc, c) - } - - return cc, nil -} - -// ValidateNetwork checks that a configuration is reasonably valid. -// It uses the same logic as ValidateNetworkList) -// Returns a list of capabilities -func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { - caps := []string{} - for c, ok := range net.Network.Capabilities { - if ok { - caps = append(caps, c) - } - } - if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { - return nil, err - } - return caps, nil -} - -// validatePlugin checks that an individual plugin's configuration is sane -func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { - pluginPath, err := invoke.FindInPath(pluginName, c.Path) - if err != nil { - return err - } - - vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) - if err != nil { - return err - } - for _, vers := range vi.SupportedVersions() { - if vers == expectedVersion { - return nil - } - } - return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) -} - -// GetVersionInfo reports which versions of the CNI spec are supported by -// the given plugin. -func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { - c.ensureExec() - pluginPath, err := c.exec.FindInPath(pluginType, c.Path) - if err != nil { - return nil, err - } - - return invoke.GetVersionInfo(ctx, pluginPath, c.exec) -} - -// ===== -func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { - return &invoke.Args{ - Command: action, - ContainerID: rt.ContainerID, - NetNS: rt.NetNS, - PluginArgs: rt.Args, - IfName: rt.IfName, - Path: strings.Join(c.Path, string(os.PathListSeparator)), - } -} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go deleted file mode 100644 index ea56c509..00000000 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package libcni - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" -) - -type NotFoundError struct { - Dir string - Name string -} - -func (e NotFoundError) Error() string { - return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) -} - -type NoConfigsFoundError struct { - Dir string -} - -func (e NoConfigsFoundError) Error() string { - return fmt.Sprintf(`no net configurations found in %s`, e.Dir) -} - -func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { - conf := &NetworkConfig{Bytes: bytes} - if err := json.Unmarshal(bytes, &conf.Network); err != nil { - return nil, fmt.Errorf("error parsing configuration: %s", err) - } - if conf.Network.Type == "" { - return nil, fmt.Errorf("error parsing configuration: missing 'type'") - } - return conf, nil -} - -func ConfFromFile(filename string) (*NetworkConfig, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", filename, err) - } - return ConfFromBytes(bytes) -} - -func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { - rawList := make(map[string]interface{}) - if err := json.Unmarshal(bytes, &rawList); err != nil { - return nil, fmt.Errorf("error parsing configuration list: %s", err) - } - - rawName, ok := rawList["name"] - if !ok { - return nil, fmt.Errorf("error parsing configuration list: no name") - } - name, ok := rawName.(string) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) - } - - var cniVersion string - rawVersion, ok := rawList["cniVersion"] - if ok { - cniVersion, ok = rawVersion.(string) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) - } - } - - disableCheck := false - if rawDisableCheck, ok := rawList["disableCheck"]; ok { - disableCheck, ok = rawDisableCheck.(bool) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) - } - } - - list := &NetworkConfigList{ - Name: name, - DisableCheck: disableCheck, - CNIVersion: cniVersion, - Bytes: bytes, - } - - var plugins []interface{} - plug, ok := rawList["plugins"] - if !ok { - return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") - } - plugins, ok = plug.([]interface{}) - if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) - } - if len(plugins) == 0 { - return nil, fmt.Errorf("error parsing configuration list: no plugins in list") - } - - for i, conf := range plugins { - newBytes, err := json.Marshal(conf) - if err != nil { - return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err) - } - netConf, err := ConfFromBytes(newBytes) - if err != nil { - return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err) - } - list.Plugins = append(list.Plugins, netConf) - } - - return list, nil -} - -func ConfListFromFile(filename string) (*NetworkConfigList, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", filename, err) - } - return ConfListFromBytes(bytes) -} - -func ConfFiles(dir string, extensions []string) ([]string, error) { - // In part, adapted from rkt/networking/podenv.go#listFiles - files, err := ioutil.ReadDir(dir) - switch { - case err == nil: // break - case os.IsNotExist(err): - return nil, nil - default: - return nil, err - } - - confFiles := []string{} - for _, f := range files { - if f.IsDir() { - continue - } - fileExt := filepath.Ext(f.Name()) - for _, ext := range extensions { - if fileExt == ext { - confFiles = append(confFiles, filepath.Join(dir, f.Name())) - } - } - } - return confFiles, nil -} - -func LoadConf(dir, name string) (*NetworkConfig, error) { - files, err := ConfFiles(dir, []string{".conf", ".json"}) - switch { - case err != nil: - return nil, err - case len(files) == 0: - return nil, NoConfigsFoundError{Dir: dir} - } - sort.Strings(files) - - for _, confFile := range files { - conf, err := ConfFromFile(confFile) - if err != nil { - return nil, err - } - if conf.Network.Name == name { - return conf, nil - } - } - return nil, NotFoundError{dir, name} -} - -func LoadConfList(dir, name string) (*NetworkConfigList, error) { - files, err := ConfFiles(dir, []string{".conflist"}) - if err != nil { - return nil, err - } - sort.Strings(files) - - for _, confFile := range files { - conf, err := ConfListFromFile(confFile) - if err != nil { - return nil, err - } - if conf.Name == name { - return conf, nil - } - } - - // Try and load a network configuration file (instead of list) - // from the same name, then upconvert. - singleConf, err := LoadConf(dir, name) - if err != nil { - // A little extra logic so the error makes sense - if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { - // Config lists found but no config files found - return nil, NotFoundError{dir, name} - } - - return nil, err - } - return ConfListFromConf(singleConf) -} - -func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { - config := make(map[string]interface{}) - err := json.Unmarshal(original.Bytes, &config) - if err != nil { - return nil, fmt.Errorf("unmarshal existing network bytes: %s", err) - } - - for key, value := range newValues { - if key == "" { - return nil, fmt.Errorf("keys cannot be empty") - } - - if value == nil { - return nil, fmt.Errorf("key '%s' value must not be nil", key) - } - - config[key] = value - } - - newBytes, err := json.Marshal(config) - if err != nil { - return nil, err - } - - return ConfFromBytes(newBytes) -} - -// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, -// with the single network as the only entry in the list. -func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { - // Re-deserialize the config's json, then make a raw map configlist. - // This may seem a bit strange, but it's to make the Bytes fields - // actually make sense. Otherwise, the generated json is littered with - // golang default values. - - rawConfig := make(map[string]interface{}) - if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { - return nil, err - } - - rawConfigList := map[string]interface{}{ - "name": original.Network.Name, - "cniVersion": original.Network.CNIVersion, - "plugins": []interface{}{rawConfig}, - } - - b, err := json.Marshal(rawConfigList) - if err != nil { - return nil, err - } - return ConfListFromBytes(b) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go deleted file mode 100644 index 913528c1..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "fmt" - "os" - "strings" -) - -type CNIArgs interface { - // For use with os/exec; i.e., return nil to inherit the - // environment from this process - // For use in delegation; inherit the environment from this - // process and allow overrides - AsEnv() []string -} - -type inherited struct{} - -var inheritArgsFromEnv inherited - -func (_ *inherited) AsEnv() []string { - return nil -} - -func ArgsFromEnv() CNIArgs { - return &inheritArgsFromEnv -} - -type Args struct { - Command string - ContainerID string - NetNS string - PluginArgs [][2]string - PluginArgsStr string - IfName string - Path string -} - -// Args implements the CNIArgs interface -var _ CNIArgs = &Args{} - -func (args *Args) AsEnv() []string { - env := os.Environ() - pluginArgsStr := args.PluginArgsStr - if pluginArgsStr == "" { - pluginArgsStr = stringify(args.PluginArgs) - } - - // Duplicated values which come first will be overrided, so we must put the - // custom values in the end to avoid being overrided by the process environments. - env = append(env, - "CNI_COMMAND="+args.Command, - "CNI_CONTAINERID="+args.ContainerID, - "CNI_NETNS="+args.NetNS, - "CNI_ARGS="+pluginArgsStr, - "CNI_IFNAME="+args.IfName, - "CNI_PATH="+args.Path, - ) - return dedupEnv(env) -} - -// taken from rkt/networking/net_plugin.go -func stringify(pluginArgs [][2]string) string { - entries := make([]string, len(pluginArgs)) - - for i, kv := range pluginArgs { - entries[i] = strings.Join(kv[:], "=") - } - - return strings.Join(entries, ";") -} - -// DelegateArgs implements the CNIArgs interface -// used for delegation to inherit from environments -// and allow some overrides like CNI_COMMAND -var _ CNIArgs = &DelegateArgs{} - -type DelegateArgs struct { - Command string -} - -func (d *DelegateArgs) AsEnv() []string { - env := os.Environ() - - // The custom values should come in the end to override the existing - // process environment of the same key. - env = append(env, - "CNI_COMMAND="+d.Command, - ) - return dedupEnv(env) -} - -// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. -// Items not of the normal environment "key=value" form are preserved unchanged. -func dedupEnv(env []string) []string { - out := make([]string, 0, len(env)) - envMap := map[string]string{} - - for _, kv := range env { - // find the first "=" in environment, if not, just keep it - eq := strings.Index(kv, "=") - if eq < 0 { - out = append(out, kv) - continue - } - envMap[kv[:eq]] = kv[eq+1:] - } - - for k, v := range envMap { - out = append(out, fmt.Sprintf("%s=%s", k, v)) - } - - return out -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go deleted file mode 100644 index 8defe4dd..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "context" - "os" - "path/filepath" - - "github.com/containernetworking/cni/pkg/types" -) - -func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { - if exec == nil { - exec = defaultExec - } - - paths := filepath.SplitList(os.Getenv("CNI_PATH")) - pluginPath, err := exec.FindInPath(delegatePlugin, paths) - if err != nil { - return "", nil, err - } - - return pluginPath, exec, nil -} - -// DelegateAdd calls the given delegate plugin with the CNI ADD action and -// JSON configuration -func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return nil, err - } - - // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD - return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) -} - -// DelegateCheck calls the given delegate plugin with the CNI CHECK action and -// JSON configuration -func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } - - // DelegateCheck will override the original CNI_COMMAND env from process with CHECK - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) -} - -// DelegateDel calls the given delegate plugin with the CNI DEL action and -// JSON configuration -func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } - - // DelegateDel will override the original CNI_COMMAND env from process with DEL - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) -} - -// return CNIArgs used by delegation -func delegateArgs(action string) *DelegateArgs { - return &DelegateArgs{ - Command: action, - } -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go deleted file mode 100644 index 8e6d30b8..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "context" - "fmt" - "os" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/version" -) - -// Exec is an interface encapsulates all operations that deal with finding -// and executing a CNI plugin. Tests may provide a fake implementation -// to avoid writing fake plugins to temporary directories during the test. -type Exec interface { - ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) - FindInPath(plugin string, paths []string) (string, error) - Decode(jsonBytes []byte) (version.PluginInfo, error) -} - -// For example, a testcase could pass an instance of the following fakeExec -// object to ExecPluginWithResult() to verify the incoming stdin and environment -// and provide a tailored response: -// -//import ( -// "encoding/json" -// "path" -// "strings" -//) -// -//type fakeExec struct { -// version.PluginDecoder -//} -// -//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { -// net := &types.NetConf{} -// err := json.Unmarshal(stdinData, net) -// if err != nil { -// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) -// } -// pluginName := path.Base(pluginPath) -// if pluginName != net.Type { -// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) -// } -// for _, e := range environ { -// // Check environment for forced failure request -// parts := strings.Split(e, "=") -// if len(parts) > 0 && parts[0] == "FAIL" { -// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) -// } -// } -// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil -//} -// -//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { -// if len(paths) > 0 { -// return path.Join(paths[0], plugin), nil -// } -// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) -//} - -func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { - if exec == nil { - exec = defaultExec - } - - stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) - if err != nil { - return nil, err - } - - // Plugin must return result in same version as specified in netconf - versionDecoder := &version.ConfigDecoder{} - confVersion, err := versionDecoder.Decode(netconf) - if err != nil { - return nil, err - } - - return version.NewResult(confVersion, stdoutBytes) -} - -func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { - if exec == nil { - exec = defaultExec - } - _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) - return err -} - -// GetVersionInfo returns the version information available about the plugin. -// For recent-enough plugins, it uses the information returned by the VERSION -// command. For older plugins which do not recognize that command, it reports -// version 0.1.0 -func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { - if exec == nil { - exec = defaultExec - } - args := &Args{ - Command: "VERSION", - - // set fake values required by plugins built against an older version of skel - NetNS: "dummy", - IfName: "dummy", - Path: "dummy", - } - stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) - stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) - if err != nil { - if err.Error() == "unknown CNI_COMMAND: VERSION" { - return version.PluginSupports("0.1.0"), nil - } - return nil, err - } - - return exec.Decode(stdoutBytes) -} - -// DefaultExec is an object that implements the Exec interface which looks -// for and executes plugins from disk. -type DefaultExec struct { - *RawExec - version.PluginDecoder -} - -// DefaultExec implements the Exec interface -var _ Exec = &DefaultExec{} - -var defaultExec = &DefaultExec{ - RawExec: &RawExec{Stderr: os.Stderr}, -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go deleted file mode 100644 index e815404c..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "fmt" - "os" - "path/filepath" -) - -// FindInPath returns the full path of the plugin by searching in the provided path -func FindInPath(plugin string, paths []string) (string, error) { - if plugin == "" { - return "", fmt.Errorf("no plugin name provided") - } - - if len(paths) == 0 { - return "", fmt.Errorf("no paths provided") - } - - for _, path := range paths { - for _, fe := range ExecutableFileExtensions { - fullpath := filepath.Join(path, plugin) + fe - if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { - return fullpath, nil - } - } - } - - return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go deleted file mode 100644 index 9bcfb455..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package invoke - -// Valid file extensions for plugin executables. -var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go deleted file mode 100644 index 7665125b..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -// Valid file extensions for plugin executables. -var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go deleted file mode 100644 index ad8498ba..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package invoke - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os/exec" - - "github.com/containernetworking/cni/pkg/types" -) - -type RawExec struct { - Stderr io.Writer -} - -func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { - stdout := &bytes.Buffer{} - c := exec.CommandContext(ctx, pluginPath) - c.Env = environ - c.Stdin = bytes.NewBuffer(stdinData) - c.Stdout = stdout - c.Stderr = e.Stderr - if err := c.Run(); err != nil { - return nil, pluginErr(err, stdout.Bytes()) - } - - return stdout.Bytes(), nil -} - -func pluginErr(err error, output []byte) error { - if _, ok := err.(*exec.ExitError); ok { - emsg := types.Error{} - if len(output) == 0 { - emsg.Msg = "netplugin failed with no error message" - } else if perr := json.Unmarshal(output, &emsg); perr != nil { - emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) - } - return &emsg - } - - return err -} - -func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { - return FindInPath(plugin, paths) -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go deleted file mode 100644 index 53256167..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types020 - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" -) - -const ImplementedSpecVersion string = "0.2.0" - -var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} - -// Compatibility types for CNI version 0.1.0 and 0.2.0 - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - // We expect version 0.1.0/0.2.0 results - result020, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := result020.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - IP4 *IPConfig `json:"ip4,omitempty"` - IP6 *IPConfig `json:"ip6,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - for _, supportedVersion := range SupportedVersions { - if version == supportedVersion { - r.CNIVersion = version - return r, nil - } - } - return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where -// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the -// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. -func (r *Result) String() string { - var str string - if r.IP4 != nil { - str = fmt.Sprintf("IP4:%+v, ", *r.IP4) - } - if r.IP6 != nil { - str += fmt.Sprintf("IP6:%+v, ", *r.IP6) - } - return fmt.Sprintf("%sDNS:%+v", str, r.DNS) -} - -// IPConfig contains values necessary to configure an interface -type IPConfig struct { - IP net.IPNet - Gateway net.IP - Routes []types.Route -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type ipConfig struct { - IP types.IPNet `json:"ip"` - Gateway net.IP `json:"gateway,omitempty"` - Routes []types.Route `json:"routes,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - IP: types.IPNet(c.IP), - Gateway: c.Gateway, - Routes: c.Routes, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.IP = net.IPNet(ipc.IP) - c.Gateway = ipc.Gateway - c.Routes = ipc.Routes - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go deleted file mode 100644 index bd8640fc..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding" - "fmt" - "reflect" - "strings" -) - -// UnmarshallableBool typedef for builtin bool -// because builtin type's methods can't be declared -type UnmarshallableBool bool - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" -func (b *UnmarshallableBool) UnmarshalText(data []byte) error { - s := strings.ToLower(string(data)) - switch s { - case "1", "true": - *b = true - case "0", "false": - *b = false - default: - return fmt.Errorf("Boolean unmarshal error: invalid input %s", s) - } - return nil -} - -// UnmarshallableString typedef for builtin string -type UnmarshallableString string - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns the string -func (s *UnmarshallableString) UnmarshalText(data []byte) error { - *s = UnmarshallableString(data) - return nil -} - -// CommonArgs contains the IgnoreUnknown argument -// and must be embedded by all Arg structs -type CommonArgs struct { - IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` -} - -// GetKeyField is a helper function to receive Values -// Values that represent a pointer to a struct -func GetKeyField(keyString string, v reflect.Value) reflect.Value { - return v.Elem().FieldByName(keyString) -} - -// UnmarshalableArgsError is used to indicate error unmarshalling args -// from the args-string in the form "K=V;K2=V2;..." -type UnmarshalableArgsError struct { - error -} - -// LoadArgs parses args from a string in the form "K=V;K2=V2;..." -func LoadArgs(args string, container interface{}) error { - if args == "" { - return nil - } - - containerValue := reflect.ValueOf(container) - - pairs := strings.Split(args, ";") - unknownArgs := []string{} - for _, pair := range pairs { - kv := strings.Split(pair, "=") - if len(kv) != 2 { - return fmt.Errorf("ARGS: invalid pair %q", pair) - } - keyString := kv[0] - valueString := kv[1] - keyField := GetKeyField(keyString, containerValue) - if !keyField.IsValid() { - unknownArgs = append(unknownArgs, pair) - continue - } - keyFieldIface := keyField.Addr().Interface() - u, ok := keyFieldIface.(encoding.TextUnmarshaler) - if !ok { - return UnmarshalableArgsError{fmt.Errorf( - "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", - keyString, reflect.TypeOf(keyFieldIface))} - } - err := u.UnmarshalText([]byte(valueString)) - if err != nil { - return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) - } - } - - isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() - if len(unknownArgs) > 0 && !isIgnoreUnknown { - return fmt.Errorf("ARGS: unknown args %q", unknownArgs) - } - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go deleted file mode 100644 index 7267a2e6..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package current - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" -) - -const ImplementedSpecVersion string = "0.4.0" - -var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} - -func NewResult(data []byte) (types.Result, error) { - result := &Result{} - if err := json.Unmarshal(data, result); err != nil { - return nil, err - } - return result, nil -} - -func GetResult(r types.Result) (*Result, error) { - resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) - if err != nil { - return nil, err - } - result, ok := resultCurrent.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - return result, nil -} - -var resultConverters = []struct { - versions []string - convert func(types.Result) (*Result, error) -}{ - {types020.SupportedVersions, convertFrom020}, - {SupportedVersions, convertFrom030}, -} - -func convertFrom020(result types.Result) (*Result, error) { - oldResult, err := types020.GetResult(result) - if err != nil { - return nil, err - } - - newResult := &Result{ - CNIVersion: ImplementedSpecVersion, - DNS: oldResult.DNS, - Routes: []*types.Route{}, - } - - if oldResult.IP4 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "4", - Address: oldResult.IP4.IP, - Gateway: oldResult.IP4.Gateway, - }) - for _, route := range oldResult.IP4.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP6 != nil { - newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "6", - Address: oldResult.IP6.IP, - Gateway: oldResult.IP6.Gateway, - }) - for _, route := range oldResult.IP6.Routes { - newResult.Routes = append(newResult.Routes, &types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - return newResult, nil -} - -func convertFrom030(result types.Result) (*Result, error) { - newResult, ok := result.(*Result) - if !ok { - return nil, fmt.Errorf("failed to convert result") - } - newResult.CNIVersion = ImplementedSpecVersion - return newResult, nil -} - -func NewResultFromResult(result types.Result) (*Result, error) { - version := result.Version() - for _, converter := range resultConverters { - for _, supportedVersion := range converter.versions { - if version == supportedVersion { - return converter.convert(result) - } - } - } - return nil, fmt.Errorf("unsupported CNI result22 version %q", version) -} - -// Result is what gets returned from the plugin (via stdout) to the caller -type Result struct { - CNIVersion string `json:"cniVersion,omitempty"` - Interfaces []*Interface `json:"interfaces,omitempty"` - IPs []*IPConfig `json:"ips,omitempty"` - Routes []*types.Route `json:"routes,omitempty"` - DNS types.DNS `json:"dns,omitempty"` -} - -// Convert to the older 0.2.0 CNI spec Result type -func (r *Result) convertTo020() (*types020.Result, error) { - oldResult := &types020.Result{ - CNIVersion: types020.ImplementedSpecVersion, - DNS: r.DNS, - } - - for _, ip := range r.IPs { - // Only convert the first IP address of each version as 0.2.0 - // and earlier cannot handle multiple IP addresses - if ip.Version == "4" && oldResult.IP4 == nil { - oldResult.IP4 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } else if ip.Version == "6" && oldResult.IP6 == nil { - oldResult.IP6 = &types020.IPConfig{ - IP: ip.Address, - Gateway: ip.Gateway, - } - } - - if oldResult.IP4 != nil && oldResult.IP6 != nil { - break - } - } - - for _, route := range r.Routes { - is4 := route.Dst.IP.To4() != nil - if is4 && oldResult.IP4 != nil { - oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } else if !is4 && oldResult.IP6 != nil { - oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{ - Dst: route.Dst, - GW: route.GW, - }) - } - } - - if oldResult.IP4 == nil && oldResult.IP6 == nil { - return nil, fmt.Errorf("cannot convert: no valid IP addresses") - } - - return oldResult, nil -} - -func (r *Result) Version() string { - return ImplementedSpecVersion -} - -func (r *Result) GetAsVersion(version string) (types.Result, error) { - switch version { - case "0.3.0", "0.3.1", ImplementedSpecVersion: - r.CNIVersion = version - return r, nil - case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: - return r.convertTo020() - } - return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version) -} - -func (r *Result) Print() error { - return r.PrintTo(os.Stdout) -} - -func (r *Result) PrintTo(writer io.Writer) error { - data, err := json.MarshalIndent(r, "", " ") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where -// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the -// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. -func (r *Result) String() string { - var str string - if len(r.Interfaces) > 0 { - str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces) - } - if len(r.IPs) > 0 { - str += fmt.Sprintf("IP:%+v, ", r.IPs) - } - if len(r.Routes) > 0 { - str += fmt.Sprintf("Routes:%+v, ", r.Routes) - } - return fmt.Sprintf("%sDNS:%+v", str, r.DNS) -} - -// Convert this old version result to the current CNI version result -func (r *Result) Convert() (*Result, error) { - return r, nil -} - -// Interface contains values about the created interfaces -type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` -} - -func (i *Interface) String() string { - return fmt.Sprintf("%+v", *i) -} - -// Int returns a pointer to the int value passed in. Used to -// set the IPConfig.Interface field. -func Int(v int) *int { - return &v -} - -// IPConfig contains values necessary to configure an IP address on an interface -type IPConfig struct { - // IP version, either "4" or "6" - Version string - // Index into Result structs Interfaces list - Interface *int - Address net.IPNet - Gateway net.IP -} - -func (i *IPConfig) String() string { - return fmt.Sprintf("%+v", *i) -} - -// JSON (un)marshallable types -type ipConfig struct { - Version string `json:"version"` - Interface *int `json:"interface,omitempty"` - Address types.IPNet `json:"address"` - Gateway net.IP `json:"gateway,omitempty"` -} - -func (c *IPConfig) MarshalJSON() ([]byte, error) { - ipc := ipConfig{ - Version: c.Version, - Interface: c.Interface, - Address: types.IPNet(c.Address), - Gateway: c.Gateway, - } - - return json.Marshal(ipc) -} - -func (c *IPConfig) UnmarshalJSON(data []byte) error { - ipc := ipConfig{} - if err := json.Unmarshal(data, &ipc); err != nil { - return err - } - - c.Version = ipc.Version - c.Interface = ipc.Interface - c.Address = net.IPNet(ipc.Address) - c.Gateway = ipc.Gateway - return nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go deleted file mode 100644 index d0d11006..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2015 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net" - "os" -) - -// like net.IPNet but adds JSON marshalling and unmarshalling -type IPNet net.IPNet - -// ParseCIDR takes a string like "10.2.3.1/24" and -// return IPNet with "10.2.3.1" and /24 mask -func ParseCIDR(s string) (*net.IPNet, error) { - ip, ipn, err := net.ParseCIDR(s) - if err != nil { - return nil, err - } - - ipn.IP = ip - return ipn, nil -} - -func (n IPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(&n).String()) -} - -func (n *IPNet) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - tmp, err := ParseCIDR(s) - if err != nil { - return err - } - - *n = IPNet(*tmp) - return nil -} - -// NetConf describes a network. -type NetConf struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Capabilities map[string]bool `json:"capabilities,omitempty"` - IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` - - RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` - PrevResult Result `json:"-"` -} - -type IPAM struct { - Type string `json:"type,omitempty"` -} - -// NetConfList describes an ordered list of networks. -type NetConfList struct { - CNIVersion string `json:"cniVersion,omitempty"` - - Name string `json:"name,omitempty"` - DisableCheck bool `json:"disableCheck,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` -} - -type ResultFactoryFunc func([]byte) (Result, error) - -// Result is an interface that provides the result of plugin execution -type Result interface { - // The highest CNI specification result version the result supports - // without having to convert - Version() string - - // Returns the result converted into the requested CNI specification - // result version, or an error if conversion failed - GetAsVersion(version string) (Result, error) - - // Prints the result in JSON format to stdout - Print() error - - // Prints the result in JSON format to provided writer - PrintTo(writer io.Writer) error - - // Returns a JSON string representation of the result - String() string -} - -func PrintResult(result Result, version string) error { - newResult, err := result.GetAsVersion(version) - if err != nil { - return err - } - return newResult.Print() -} - -// DNS contains values interesting for DNS resolvers -type DNS struct { - Nameservers []string `json:"nameservers,omitempty"` - Domain string `json:"domain,omitempty"` - Search []string `json:"search,omitempty"` - Options []string `json:"options,omitempty"` -} - -type Route struct { - Dst net.IPNet - GW net.IP -} - -func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) -} - -// Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes -const ( - ErrUnknown uint = iota // 0 - ErrIncompatibleCNIVersion // 1 - ErrUnsupportedField // 2 -) - -type Error struct { - Code uint `json:"code"` - Msg string `json:"msg"` - Details string `json:"details,omitempty"` -} - -func (e *Error) Error() string { - details := "" - if e.Details != "" { - details = fmt.Sprintf("; %v", e.Details) - } - return fmt.Sprintf("%v%v", e.Msg, details) -} - -func (e *Error) Print() error { - return prettyPrint(e) -} - -// net.IPNet is not JSON (un)marshallable so this duality is needed -// for our custom IPNet type - -// JSON (un)marshallable types -type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` -} - -func (r *Route) UnmarshalJSON(data []byte) error { - rt := route{} - if err := json.Unmarshal(data, &rt); err != nil { - return err - } - - r.Dst = net.IPNet(rt.Dst) - r.GW = rt.GW - return nil -} - -func (r Route) MarshalJSON() ([]byte, error) { - rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, - } - - return json.Marshal(rt) -} - -func prettyPrint(obj interface{}) error { - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - _, err = os.Stdout.Write(data) - return err -} - -// NotImplementedError is used to indicate that a method is not implemented for the given platform -var NotImplementedError = errors.New("Not Implemented") diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go deleted file mode 100644 index 3cca58bb..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/conf.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" -) - -// ConfigDecoder can decode the CNI version available in network config data -type ConfigDecoder struct{} - -func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { - var conf struct { - CNIVersion string `json:"cniVersion"` - } - err := json.Unmarshal(jsonBytes, &conf) - if err != nil { - return "", fmt.Errorf("decoding version from network config: %s", err) - } - if conf.CNIVersion == "" { - return "0.1.0", nil - } - return conf.CNIVersion, nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go deleted file mode 100644 index 1df42724..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// PluginInfo reports information about CNI versioning -type PluginInfo interface { - // SupportedVersions returns one or more CNI spec versions that the plugin - // supports. If input is provided in one of these versions, then the plugin - // promises to use the same CNI version in its response - SupportedVersions() []string - - // Encode writes this CNI version information as JSON to the given Writer - Encode(io.Writer) error -} - -type pluginInfo struct { - CNIVersion_ string `json:"cniVersion"` - SupportedVersions_ []string `json:"supportedVersions,omitempty"` -} - -// pluginInfo implements the PluginInfo interface -var _ PluginInfo = &pluginInfo{} - -func (p *pluginInfo) Encode(w io.Writer) error { - return json.NewEncoder(w).Encode(p) -} - -func (p *pluginInfo) SupportedVersions() []string { - return p.SupportedVersions_ -} - -// PluginSupports returns a new PluginInfo that will report the given versions -// as supported -func PluginSupports(supportedVersions ...string) PluginInfo { - if len(supportedVersions) < 1 { - panic("programmer error: you must support at least one version") - } - return &pluginInfo{ - CNIVersion_: Current(), - SupportedVersions_: supportedVersions, - } -} - -// PluginDecoder can decode the response returned by a plugin's VERSION command -type PluginDecoder struct{} - -func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { - var info pluginInfo - err := json.Unmarshal(jsonBytes, &info) - if err != nil { - return nil, fmt.Errorf("decoding version info: %s", err) - } - if info.CNIVersion_ == "" { - return nil, fmt.Errorf("decoding version info: missing field cniVersion") - } - if len(info.SupportedVersions_) == 0 { - if info.CNIVersion_ == "0.2.0" { - return PluginSupports("0.1.0", "0.2.0"), nil - } - return nil, fmt.Errorf("decoding version info: missing field supportedVersions") - } - return &info, nil -} - -// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, -// minor, and micro numbers or returns an error -func ParseVersion(version string) (int, int, int, error) { - var major, minor, micro int - if version == "" { - return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) - } - - parts := strings.Split(version, ".") - if len(parts) >= 4 { - return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) - } - - major, err := strconv.Atoi(parts[0]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) - } - - if len(parts) >= 2 { - minor, err = strconv.Atoi(parts[1]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) - } - } - - if len(parts) >= 3 { - micro, err = strconv.Atoi(parts[2]) - if err != nil { - return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) - } - } - - return major, minor, micro, nil -} - -// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro -// numbers, and compares them to determine whether the first version is greater -// than or equal to the second -func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { - firstMajor, firstMinor, firstMicro, err := ParseVersion(version) - if err != nil { - return false, err - } - - secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) - if err != nil { - return false, err - } - - if firstMajor > secondMajor { - return true, nil - } else if firstMajor == secondMajor { - if firstMinor > secondMinor { - return true, nil - } else if firstMinor == secondMinor && firstMicro >= secondMicro { - return true, nil - } - } - return false, nil -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go deleted file mode 100644 index 25c3810b..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import "fmt" - -type ErrorIncompatible struct { - Config string - Supported []string -} - -func (e *ErrorIncompatible) Details() string { - return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) -} - -func (e *ErrorIncompatible) Error() string { - return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) -} - -type Reconciler struct{} - -func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { - return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) -} - -func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { - for _, supportedVersion := range supportedVersions { - if configVersion == supportedVersion { - return nil - } - } - - return &ErrorIncompatible{ - Config: configVersion, - Supported: supportedVersions, - } -} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go deleted file mode 100644 index 8f3508e6..00000000 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 CNI authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "encoding/json" - "fmt" - - "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/cni/pkg/types/020" - "github.com/containernetworking/cni/pkg/types/current" -) - -// Current reports the version of the CNI spec implemented by this library -func Current() string { - return "0.4.0" -} - -// Legacy PluginInfo describes a plugin that is backwards compatible with the -// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 -// library ought to work correctly with a plugin that reports support for -// Legacy versions. -// -// Any future CNI spec versions which meet this definition should be added to -// this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") - -var resultFactories = []struct { - supportedVersions []string - newResult types.ResultFactoryFunc -}{ - {current.SupportedVersions, current.NewResult}, - {types020.SupportedVersions, types020.NewResult}, -} - -// Finds a Result object matching the requested version (if any) and asks -// that object to parse the plugin result, returning an error if parsing failed. -func NewResult(version string, resultBytes []byte) (types.Result, error) { - reconciler := &Reconciler{} - for _, resultFactory := range resultFactories { - err := reconciler.CheckRaw(version, resultFactory.supportedVersions) - if err == nil { - // Result supports this version - return resultFactory.newResult(resultBytes) - } - } - - return nil, fmt.Errorf("unsupported CNI result version %q", version) -} - -// ParsePrevResult parses a prevResult in a NetConf structure and sets -// the NetConf's PrevResult member to the parsed Result object. -func ParsePrevResult(conf *types.NetConf) error { - if conf.RawPrevResult == nil { - return nil - } - - resultBytes, err := json.Marshal(conf.RawPrevResult) - if err != nil { - return fmt.Errorf("could not serialize prevResult: %v", err) - } - - conf.RawPrevResult = nil - conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) - if err != nil { - return fmt.Errorf("could not parse prevResult: %v", err) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go deleted file mode 100644 index 57f224af..00000000 --- a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go +++ /dev/null @@ -1,64 +0,0 @@ -package dockerignore // import "github.com/docker/docker/builder/dockerignore" - -import ( - "bufio" - "bytes" - "fmt" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadAll(reader io.Reader) ([]string, error) { - if reader == nil { - return nil, nil - } - - scanner := bufio.NewScanner(reader) - var excludes []string - currentLine := 0 - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - // normalize absolute paths to paths relative to the context - // (taking care of '!' prefix) - invert := pattern[0] == '!' - if invert { - pattern = strings.TrimSpace(pattern[1:]) - } - if len(pattern) > 0 { - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - if len(pattern) > 1 && pattern[0] == '/' { - pattern = pattern[1:] - } - } - if invert { - pattern = "!" + pattern - } - - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md deleted file mode 100644 index 2b237a59..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go deleted file mode 100644 index 88ef7b5e..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package signal provides helper functions for dealing with signals across -// various operating systems. -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" -) - -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - var handledSigs []os.Signal - for _, s := range SignalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} - -// ParseSignal translates a string to a valid syscall signal. -// It returns an error if the signal map doesn't include the given signal. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - if s == 0 { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return syscall.Signal(s), nil - } - signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return signal, nil -} - -// ValidSignalForPlatform returns true if a signal is valid on the platform -func ValidSignalForPlatform(sig syscall.Signal) bool { - for _, v := range SignalMap { - if v == sig { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go deleted file mode 100644 index ee5501e3..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is a map of Darwin signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go deleted file mode 100644 index 764f90e2..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is a map of FreeBSD signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "THR": syscall.SIGTHR, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go deleted file mode 100644 index 4013bded..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build !mips,!mipsle,!mips64,!mips64le - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - sigrtmin = 34 - sigrtmax = 64 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": unix.SIGABRT, - "ALRM": unix.SIGALRM, - "BUS": unix.SIGBUS, - "CHLD": unix.SIGCHLD, - "CLD": unix.SIGCLD, - "CONT": unix.SIGCONT, - "FPE": unix.SIGFPE, - "HUP": unix.SIGHUP, - "ILL": unix.SIGILL, - "INT": unix.SIGINT, - "IO": unix.SIGIO, - "IOT": unix.SIGIOT, - "KILL": unix.SIGKILL, - "PIPE": unix.SIGPIPE, - "POLL": unix.SIGPOLL, - "PROF": unix.SIGPROF, - "PWR": unix.SIGPWR, - "QUIT": unix.SIGQUIT, - "SEGV": unix.SIGSEGV, - "STKFLT": unix.SIGSTKFLT, - "STOP": unix.SIGSTOP, - "SYS": unix.SIGSYS, - "TERM": unix.SIGTERM, - "TRAP": unix.SIGTRAP, - "TSTP": unix.SIGTSTP, - "TTIN": unix.SIGTTIN, - "TTOU": unix.SIGTTOU, - "URG": unix.SIGURG, - "USR1": unix.SIGUSR1, - "USR2": unix.SIGUSR2, - "VTALRM": unix.SIGVTALRM, - "WINCH": unix.SIGWINCH, - "XCPU": unix.SIGXCPU, - "XFSZ": unix.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go deleted file mode 100644 index c78c887a..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build linux -// +build mips mipsle mips64 mips64le - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - sigrtmin = 34 - sigrtmax = 127 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": unix.SIGABRT, - "ALRM": unix.SIGALRM, - "BUS": unix.SIGBUS, - "CHLD": unix.SIGCHLD, - "CLD": unix.SIGCLD, - "CONT": unix.SIGCONT, - "FPE": unix.SIGFPE, - "HUP": unix.SIGHUP, - "ILL": unix.SIGILL, - "INT": unix.SIGINT, - "IO": unix.SIGIO, - "IOT": unix.SIGIOT, - "KILL": unix.SIGKILL, - "PIPE": unix.SIGPIPE, - "POLL": unix.SIGPOLL, - "PROF": unix.SIGPROF, - "PWR": unix.SIGPWR, - "QUIT": unix.SIGQUIT, - "SEGV": unix.SIGSEGV, - "EMT": unix.SIGEMT, - "STOP": unix.SIGSTOP, - "SYS": unix.SIGSYS, - "TERM": unix.SIGTERM, - "TRAP": unix.SIGTRAP, - "TSTP": unix.SIGTSTP, - "TTIN": unix.SIGTTIN, - "TTOU": unix.SIGTTOU, - "URG": unix.SIGURG, - "USR1": unix.SIGUSR1, - "USR2": unix.SIGUSR2, - "VTALRM": unix.SIGVTALRM, - "WINCH": unix.SIGWINCH, - "XCPU": unix.SIGXCPU, - "XFSZ": unix.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go deleted file mode 100644 index a2aa4248..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) - -const ( - // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. - SIGCHLD = syscall.SIGCHLD - // SIGWINCH is a signal sent to a process when its controlling terminal changes its size - SIGWINCH = syscall.SIGWINCH - // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading - SIGPIPE = syscall.SIGPIPE - // DefaultStopSignal is the syscall signal used to stop a container in unix systems. - DefaultStopSignal = "SIGTERM" -) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go deleted file mode 100644 index 1fd25a83..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is an empty map of signals for unsupported platform. -var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go deleted file mode 100644 index 65752f24..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go deleted file mode 100644 index a277b956..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/trap.go +++ /dev/null @@ -1,104 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "fmt" - "os" - gosignal "os/signal" - "path/filepath" - "runtime" - "strings" - "sync/atomic" - "syscall" - "time" - - "github.com/pkg/errors" -) - -// Trap sets up a simplified signal "trap", appropriate for common -// behavior expected from a vanilla unix command-line tool in general -// (and the Docker engine in particular). -// -// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. -// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is -// skipped and the process is terminated immediately (allows force quit of stuck daemon) -// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. -// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while -// the docker daemon is not restarted and also running under systemd. -// Fixes https://github.com/docker/docker/issues/19728 -// -func Trap(cleanup func(), logger interface { - Info(args ...interface{}) -}) { - c := make(chan os.Signal, 1) - // we will handle INT, TERM, QUIT, SIGPIPE here - signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} - gosignal.Notify(c, signals...) - go func() { - interruptCount := uint32(0) - for sig := range c { - if sig == syscall.SIGPIPE { - continue - } - - go func(sig os.Signal) { - logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) - switch sig { - case os.Interrupt, syscall.SIGTERM: - if atomic.LoadUint32(&interruptCount) < 3 { - // Initiate the cleanup only once - if atomic.AddUint32(&interruptCount, 1) == 1 { - // Call the provided cleanup handler - cleanup() - os.Exit(0) - } else { - return - } - } else { - // 3 SIGTERM/INT signals received; force exit without cleanup - logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") - } - case syscall.SIGQUIT: - DumpStacks("") - logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") - } - // for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # - os.Exit(128 + int(sig.(syscall.Signal))) - }(sig) - } - }() -} - -const stacksLogNameTemplate = "goroutine-stacks-%s.log" - -// DumpStacks appends the runtime stack into file in dir and returns full path -// to that file. -func DumpStacks(dir string) (string, error) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - var f *os.File - if dir != "" { - path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) - var err error - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") - } - defer f.Close() - defer f.Sync() - } else { - f = os.Stderr - } - if _, err := f.Write(buf); err != nil { - return "", errors.Wrap(err, "failed to write goroutine stacks") - } - return f.Name(), nil -} diff --git a/vendor/github.com/genuinetools/img/client/client.go b/vendor/github.com/genuinetools/img/client/client.go deleted file mode 100644 index 7518c75c..00000000 --- a/vendor/github.com/genuinetools/img/client/client.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "os" - "path/filepath" - - "github.com/containerd/containerd/snapshots/overlay" - "github.com/genuinetools/img/types" - "github.com/moby/buildkit/control" - "github.com/moby/buildkit/session" - "github.com/sirupsen/logrus" -) - -// Client holds the information for the client we will use for communicating -// with the buildkit controller. -type Client struct { - backend string - localDirs map[string]string - root string - - sessionManager *session.Manager - controller *control.Controller -} - -// New returns a new client for communicating with the buildkit controller. -func New(root, backend string, localDirs map[string]string) (*Client, error) { - // Set the name for the directory executor. - name := "runc" - - switch backend { - case types.AutoBackend: - if overlay.Supported(root) == nil { - backend = types.OverlayFSBackend - } else { - backend = types.NativeBackend - } - logrus.Debugf("using backend: %s", backend) - } - - // Create the root/ - root = filepath.Join(root, name, backend) - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - - // Create the start of the client. - return &Client{ - backend: backend, - root: root, - localDirs: localDirs, - }, nil -} - -// Close safely closes the client. -// This used to shut down the FUSE server but since that was removed -// it is basically a no-op now. -func (c *Client) Close() {} diff --git a/vendor/github.com/genuinetools/img/client/controller.go b/vendor/github.com/genuinetools/img/client/controller.go deleted file mode 100644 index eb2498b5..00000000 --- a/vendor/github.com/genuinetools/img/client/controller.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -import ( - "fmt" - "path/filepath" - - "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/cache/remotecache" - inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" - localremotecache "github.com/moby/buildkit/cache/remotecache/local" - registryremotecache "github.com/moby/buildkit/cache/remotecache/registry" - "github.com/moby/buildkit/control" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/gateway" - "github.com/moby/buildkit/frontend/gateway/forwarder" - "github.com/moby/buildkit/solver/bboltcachestorage" - "github.com/moby/buildkit/worker" - "github.com/moby/buildkit/worker/base" -) - -func (c *Client) createController() error { - sm, err := c.getSessionManager() - if err != nil { - return fmt.Errorf("creating session manager failed: %v", err) - } - // Create the worker opts. - opt, err := c.createWorkerOpt(true) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - // Create the new worker. - w, err := base.NewWorker(opt) - if err != nil { - return fmt.Errorf("creating worker failed: %v", err) - } - - // Create the worker controller. - wc := &worker.Controller{} - if err := wc.Add(w); err != nil { - return fmt.Errorf("adding worker to worker controller failed: %v", err) - } - - // Add the frontends. - frontends := map[string]frontend.Frontend{} - frontends["dockerfile.v0"] = forwarder.NewGatewayForwarder(wc, builder.Build) - frontends["gateway.v0"] = gateway.NewGatewayFrontend(wc) - - // Create the cache storage - cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(c.root, "cache.db")) - if err != nil { - return err - } - - remoteCacheExporterFuncs := map[string]remotecache.ResolveCacheExporterFunc{ - "inline": inlineremotecache.ResolveCacheExporterFunc(), - "local": localremotecache.ResolveCacheExporterFunc(sm), - "registry": registryremotecache.ResolveCacheExporterFunc(sm, docker.ConfigureDefaultRegistries()), - } - remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{ - "local": localremotecache.ResolveCacheImporterFunc(sm), - "registry": registryremotecache.ResolveCacheImporterFunc(sm, opt.ContentStore, docker.ConfigureDefaultRegistries()), - } - - // Create the controller. - controller, err := control.NewController(control.Opt{ - SessionManager: sm, - WorkerController: wc, - Frontends: frontends, - ResolveCacheExporterFuncs: remoteCacheExporterFuncs, - ResolveCacheImporterFuncs: remoteCacheImporterFuncs, - CacheKeyStorage: cacheStorage, - }) - if err != nil { - return fmt.Errorf("creating new controller failed: %v", err) - } - - // Set the controller for the client. - c.controller = controller - - return nil -} diff --git a/vendor/github.com/genuinetools/img/client/diskusage.go b/vendor/github.com/genuinetools/img/client/diskusage.go deleted file mode 100644 index e33e2a63..00000000 --- a/vendor/github.com/genuinetools/img/client/diskusage.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "context" - "fmt" - - controlapi "github.com/moby/buildkit/api/services/control" -) - -// DiskUsage returns the disk usage being consumed by the buildkit controller. -func (c *Client) DiskUsage(ctx context.Context, req *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { - if c.controller == nil { - // Create the controller. - if err := c.createController(); err != nil { - return nil, err - } - } - - // Call diskusage. - resp, err := c.controller.DiskUsage(ctx, req) - if err != nil { - return nil, fmt.Errorf("getting disk usage failed: %v", err) - } - - return resp, nil -} diff --git a/vendor/github.com/genuinetools/img/client/list.go b/vendor/github.com/genuinetools/img/client/list.go deleted file mode 100644 index 311cfe2c..00000000 --- a/vendor/github.com/genuinetools/img/client/list.go +++ /dev/null @@ -1,67 +0,0 @@ -package client - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/images" - ctdmetadata "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/platforms" - bolt "go.etcd.io/bbolt" -) - -// ListedImage represents an image structure returuned from ListImages. -// It extends containerd/images.Image with extra fields. -type ListedImage struct { - images.Image - ContentSize int64 -} - -// ListImages returns the images from the image store. -func (c *Client) ListImages(ctx context.Context, filters ...string) ([]ListedImage, error) { - dbPath := filepath.Join(c.root, "containerdmeta.db") - if _, err := os.Stat(dbPath); os.IsNotExist(err) { - // The metadata database does not exist so we should just return as if there - // were no results. - return nil, nil - } - - // Open the bolt database for metadata. - // Since we are only listing we can open it as read-only. - db, err := bolt.Open(dbPath, 0644, &bolt.Options{ReadOnly: true}) - if err != nil { - return nil, fmt.Errorf("opening boltdb failed: %v", err) - } - - // Create the content store locally. - contentStore, err := local.NewStore(filepath.Join(c.root, "content")) - if err != nil { - return nil, fmt.Errorf("creating content store failed: %v", err) - } - - // Create the database for metadata. - mdb := ctdmetadata.NewDB(db, contentStore, nil) - - // Create the image store. - imageStore := ctdmetadata.NewImageStore(mdb) - - // List the images in the image store. - i, err := imageStore.List(ctx, filters...) - if err != nil { - return nil, fmt.Errorf("listing images with filters (%s) failed: %v", strings.Join(filters, ", "), err) - } - - listedImages := []ListedImage{} - for _, image := range i { - size, err := image.Size(ctx, contentStore, platforms.Default()) - if err != nil { - return nil, fmt.Errorf("calculating size of image %s failed: %v", image.Name, err) - } - listedImages = append(listedImages, ListedImage{Image: image, ContentSize: size}) - } - return listedImages, nil -} diff --git a/vendor/github.com/genuinetools/img/client/prune.go b/vendor/github.com/genuinetools/img/client/prune.go deleted file mode 100644 index 7f968f32..00000000 --- a/vendor/github.com/genuinetools/img/client/prune.go +++ /dev/null @@ -1,61 +0,0 @@ -package client - -import ( - "context" - "fmt" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/worker/base" - "golang.org/x/sync/errgroup" -) - -// Prune calls Prune on the worker. -func (c *Client) Prune(ctx context.Context) ([]*controlapi.UsageRecord, error) { - ch := make(chan client.UsageInfo) - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return nil, fmt.Errorf("creating worker opt failed: %v", err) - } - - // Create the new worker. - w, err := base.NewWorker(opt) - if err != nil { - return nil, fmt.Errorf("creating worker failed: %v", err) - } - - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - // Call prune on the worker. - return w.Prune(ctx, ch) - }) - - eg2, ctx := errgroup.WithContext(ctx) - eg2.Go(func() error { - defer close(ch) - return eg.Wait() - }) - - usage := []*controlapi.UsageRecord{} - eg2.Go(func() error { - for r := range ch { - usage = append(usage, &controlapi.UsageRecord{ - ID: r.ID, - Mutable: r.Mutable, - InUse: r.InUse, - Size_: r.Size, - Parent: r.Parent, - UsageCount: int64(r.UsageCount), - Description: r.Description, - CreatedAt: r.CreatedAt, - LastUsedAt: r.LastUsedAt, - }) - } - - return nil - }) - - return usage, eg2.Wait() -} diff --git a/vendor/github.com/genuinetools/img/client/pull.go b/vendor/github.com/genuinetools/img/client/pull.go deleted file mode 100644 index 49d1ffe1..00000000 --- a/vendor/github.com/genuinetools/img/client/pull.go +++ /dev/null @@ -1,117 +0,0 @@ -package client - -import ( - "context" - "fmt" - - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/exporter" - imageexporter "github.com/moby/buildkit/exporter/containerimage" - "github.com/moby/buildkit/source" - "github.com/moby/buildkit/source/containerimage" -) - -// Pull retrieves an image from a remote registry. -func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) { - sm, err := c.getSessionManager() - if err != nil { - return nil, err - } - // Parse the image name and tag. - named, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, fmt.Errorf("parsing image name %q failed: %v", image, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - image = named.String() - - // Get the identifier for the image. - identifier, err := source.NewImageIdentifier(image) - if err != nil { - return nil, err - } - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return nil, fmt.Errorf("creating worker opt failed: %v", err) - } - - cm, err := cache.NewManager(cache.ManagerOpt{ - Snapshotter: opt.Snapshotter, - MetadataStore: opt.MetadataStore, - ContentStore: opt.ContentStore, - LeaseManager: opt.LeaseManager, - GarbageCollect: opt.GarbageCollect, - Applier: opt.Applier, - }) - if err != nil { - return nil, err - } - - // Create the source for the pull. - srcOpt := containerimage.SourceOpt{ - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - Applier: opt.Applier, - CacheAccessor: cm, - ImageStore: opt.ImageStore, - RegistryHosts: opt.RegistryHosts, - LeaseManager: opt.LeaseManager, - } - src, err := containerimage.NewSource(srcOpt) - if err != nil { - return nil, err - } - s, err := src.Resolve(ctx, identifier, sm) - if err != nil { - return nil, err - } - ref, err := s.Snapshot(ctx) - if err != nil { - return nil, err - } - - // Create the exporter for the pull. - iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - Differ: opt.Differ, - }) - if err != nil { - return nil, err - } - expOpt := imageexporter.Opt{ - SessionManager: sm, - ImageWriter: iw, - Images: opt.ImageStore, - RegistryHosts: opt.RegistryHosts, - LeaseManager: opt.LeaseManager, - } - exp, err := imageexporter.New(expOpt) - if err != nil { - return nil, err - } - e, err := exp.Resolve(ctx, map[string]string{"name": image}) - if err != nil { - return nil, err - } - if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil { - return nil, err - } - - // Get the image. - img, err := opt.ImageStore.Get(ctx, image) - if err != nil { - return nil, fmt.Errorf("getting image %s from image store failed: %v", image, err) - } - size, err := img.Size(ctx, opt.ContentStore, platforms.Default()) - if err != nil { - return nil, fmt.Errorf("calculating size of image %s failed: %v", img.Name, err) - } - - return &ListedImage{Image: img, ContentSize: size}, nil -} diff --git a/vendor/github.com/genuinetools/img/client/push.go b/vendor/github.com/genuinetools/img/client/push.go deleted file mode 100644 index fd0cc1a4..00000000 --- a/vendor/github.com/genuinetools/img/client/push.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "context" - "fmt" - - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/util/push" -) - -// Push sends an image to a remote registry. -func (c *Client) Push(ctx context.Context, image string, insecure bool) error { - // Parse the image name and tag. - named, err := reference.ParseNormalizedNamed(image) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", image, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - image = named.String() - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - imgObj, err := opt.ImageStore.Get(ctx, image) - if err != nil { - return fmt.Errorf("getting image %q failed: %v", image, err) - } - - sm, err := c.getSessionManager() - if err != nil { - return err - } - return push.Push(ctx, sm, opt.ContentStore, imgObj.Target.Digest, image, insecure, opt.RegistryHosts, false) -} diff --git a/vendor/github.com/genuinetools/img/client/remove.go b/vendor/github.com/genuinetools/img/client/remove.go deleted file mode 100644 index b94d7931..00000000 --- a/vendor/github.com/genuinetools/img/client/remove.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "context" - "fmt" - - "github.com/containerd/containerd/images" - "github.com/docker/distribution/reference" -) - -// RemoveImage removes image from the image store. -func (c *Client) RemoveImage(ctx context.Context, image string) error { - named, err := reference.ParseNormalizedNamed(image) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", image, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - image = named.String() - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - // Remove the image from the image store. - err = opt.ImageStore.Delete(ctx, image, images.SynchronousDelete()) - if err != nil { - return fmt.Errorf("removing image failed: %v", err) - } - - return nil -} diff --git a/vendor/github.com/genuinetools/img/client/save.go b/vendor/github.com/genuinetools/img/client/save.go deleted file mode 100644 index 24c9eca6..00000000 --- a/vendor/github.com/genuinetools/img/client/save.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/containerd/containerd/images/archive" - "github.com/docker/distribution/reference" -) - -// SaveImage exports an image as a tarball which can then be imported by docker. -func (c *Client) SaveImage(ctx context.Context, image, format string, writer io.WriteCloser) error { - // Parse the image name and tag. - named, err := reference.ParseNormalizedNamed(image) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", image, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - image = named.String() - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - if opt.ImageStore == nil { - return errors.New("image store is nil") - } - - exportOpts := []archive.ExportOpt{ - archive.WithImage(opt.ImageStore, image), - } - - switch format { - case "docker": - - case "oci": - exportOpts = append(exportOpts, archive.WithSkipDockerManifest()) - - default: - return fmt.Errorf("%q is not a valid format", format) - } - - if err := archive.Export(ctx, opt.ContentStore, writer, exportOpts...); err != nil { - return fmt.Errorf("exporting image %s failed: %v", image, err) - } - - return writer.Close() -} diff --git a/vendor/github.com/genuinetools/img/client/session.go b/vendor/github.com/genuinetools/img/client/session.go deleted file mode 100644 index 01b3a5d5..00000000 --- a/vendor/github.com/genuinetools/img/client/session.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "context" - "os" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth/authprovider" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/session/testutil" - "github.com/pkg/errors" -) - -func (c *Client) getSessionManager() (*session.Manager, error) { - if c.sessionManager == nil { - var err error - c.sessionManager, err = session.NewManager() - if err != nil { - return nil, err - } - } - return c.sessionManager, nil -} - -// Session creates the session manager and returns the session and it's -// dialer. -func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer, error) { - m, err := c.getSessionManager() - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create session manager") - } - sessionName := "img" - s, err := session.NewSession(ctx, sessionName, "") - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create session") - } - syncedDirs := make([]filesync.SyncedDir, 0, len(c.localDirs)) - for name, d := range c.localDirs { - syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d}) - } - s.Allow(filesync.NewFSSyncProvider(syncedDirs)) - s.Allow(authprovider.NewDockerAuthProvider(os.Stderr)) - return s, sessionDialer(s, m), err -} - -func sessionDialer(s *session.Session, m *session.Manager) session.Dialer { - // FIXME: rename testutil - return session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn))) -} diff --git a/vendor/github.com/genuinetools/img/client/solve.go b/vendor/github.com/genuinetools/img/client/solve.go deleted file mode 100644 index 4ec4a0b2..00000000 --- a/vendor/github.com/genuinetools/img/client/solve.go +++ /dev/null @@ -1,68 +0,0 @@ -package client - -import ( - "context" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" -) - -// Solve calls Solve on the controller. -func (c *Client) Solve(ctx context.Context, req *controlapi.SolveRequest, ch chan *controlapi.StatusResponse) error { - defer close(ch) - if c.controller == nil { - // Create the controller. - if err := c.createController(); err != nil { - return err - } - } - - statusCtx, cancelStatus := context.WithCancel(context.Background()) - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - defer func() { // make sure the Status ends cleanly on build errors - go func() { - <-time.After(3 * time.Second) - cancelStatus() - }() - }() - _, err := c.controller.Solve(ctx, req) - if err != nil { - return errors.Wrap(err, "failed to solve") - } - return nil - }) - - eg.Go(func() error { - srv := &controlStatusServer{ - ctx: statusCtx, - ch: ch, - } - return c.controller.Status(&controlapi.StatusRequest{ - Ref: req.Ref, - }, srv) - }) - return eg.Wait() -} - -type controlStatusServer struct { - ctx context.Context - ch chan *controlapi.StatusResponse - grpc.ServerStream // dummy -} - -func (x *controlStatusServer) SendMsg(m interface{}) error { - return x.Send(m.(*controlapi.StatusResponse)) -} - -func (x *controlStatusServer) Send(m *controlapi.StatusResponse) error { - x.ch <- m - return nil -} - -func (x *controlStatusServer) Context() context.Context { - return x.ctx -} diff --git a/vendor/github.com/genuinetools/img/client/tag.go b/vendor/github.com/genuinetools/img/client/tag.go deleted file mode 100644 index cdfe1164..00000000 --- a/vendor/github.com/genuinetools/img/client/tag.go +++ /dev/null @@ -1,68 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/docker/distribution/reference" -) - -// TagImage creates a reference to an image with a specific name in the image store. -func (c *Client) TagImage(ctx context.Context, src, dest string) error { - // Parse the image name and tag for the src image. - named, err := reference.ParseNormalizedNamed(src) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", src, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - src = named.String() - - // Parse the image name and tag for the dest image. - named, err = reference.ParseNormalizedNamed(dest) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", dest, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - dest = named.String() - - // Create the worker opts. - opt, err := c.createWorkerOpt(false) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - if opt.ImageStore == nil { - return errors.New("image store is nil") - } - - // Get the source image. - image, err := opt.ImageStore.Get(ctx, src) - if err != nil { - return fmt.Errorf("getting image %s from image store failed: %v", src, err) - } - - // Update the target image. Create it if it does not exist. - img := images.Image{ - Name: dest, - Target: image.Target, - CreatedAt: time.Now(), - } - if _, err := opt.ImageStore.Update(ctx, img); err != nil { - if !errdefs.IsNotFound(err) { - return fmt.Errorf("updating image store for %s failed: %v", dest, err) - } - - // Create it if we didn't find it. - if _, err := opt.ImageStore.Create(ctx, img); err != nil { - return fmt.Errorf("creating image in image store for %s failed: %v", dest, err) - } - } - - return nil -} diff --git a/vendor/github.com/genuinetools/img/client/unpack.go b/vendor/github.com/genuinetools/img/client/unpack.go deleted file mode 100644 index 8eb5baf9..00000000 --- a/vendor/github.com/genuinetools/img/client/unpack.go +++ /dev/null @@ -1,75 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "os" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -// Unpack exports an image to a rootfs destination directory. -func (c *Client) Unpack(ctx context.Context, image, dest string) error { - if len(dest) < 1 { - return errors.New("destination directory for rootfs cannot be empty") - } - - if _, err := os.Stat(dest); err == nil { - return fmt.Errorf("destination directory already exists: %s", dest) - } - - // Parse the image name and tag. - named, err := reference.ParseNormalizedNamed(image) - if err != nil { - return fmt.Errorf("parsing image name %q failed: %v", image, err) - } - // Add the latest lag if they did not provide one. - named = reference.TagNameOnly(named) - image = named.String() - - // Create the worker opts. - opt, err := c.createWorkerOpt(true) - if err != nil { - return fmt.Errorf("creating worker opt failed: %v", err) - } - - if opt.ImageStore == nil { - return errors.New("image store is nil") - } - - img, err := opt.ImageStore.Get(ctx, image) - if err != nil { - return fmt.Errorf("getting image %s from image store failed: %v", image, err) - } - - manifest, err := images.Manifest(ctx, opt.ContentStore, img.Target, platforms.Default()) - if err != nil { - return fmt.Errorf("getting image manifest failed: %v", err) - } - - for _, desc := range manifest.Layers { - logrus.Debugf("Unpacking layer %s", desc.Digest.String()) - - // Read the blob from the content store. - layer, err := opt.ContentStore.ReaderAt(ctx, desc) - if err != nil { - return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err) - } - - // Unpack the tarfile to the rootfs path. - // FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions - if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{ - NoLchown: true, - }); err != nil { - return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err) - } - } - - return nil -} diff --git a/vendor/github.com/genuinetools/img/client/workeropt.go b/vendor/github.com/genuinetools/img/client/workeropt.go deleted file mode 100644 index 58640c0c..00000000 --- a/vendor/github.com/genuinetools/img/client/workeropt.go +++ /dev/null @@ -1,156 +0,0 @@ -package client - -import ( - "context" - "fmt" - "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/util/leaseutil" - "os/exec" - "path/filepath" - "syscall" - - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/diff/apply" - "github.com/containerd/containerd/diff/walking" - ctdmetadata "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/platforms" - ctdsnapshot "github.com/containerd/containerd/snapshots" - "github.com/containerd/containerd/snapshots/native" - "github.com/containerd/containerd/snapshots/overlay" - "github.com/genuinetools/img/types" - "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/executor" - executoroci "github.com/moby/buildkit/executor/oci" - "github.com/moby/buildkit/executor/runcexecutor" - containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" - "github.com/moby/buildkit/util/binfmt_misc" - "github.com/moby/buildkit/util/network/netproviders" - "github.com/moby/buildkit/worker/base" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -// createWorkerOpt creates a base.WorkerOpt to be used for a new worker. -func (c *Client) createWorkerOpt(withExecutor bool) (opt base.WorkerOpt, err error) { - // Create the metadata store. - md, err := metadata.NewStore(filepath.Join(c.root, "metadata.db")) - if err != nil { - return opt, err - } - - snapshotRoot := filepath.Join(c.root, "snapshots") - unprivileged := system.GetParentNSeuid() != 0 - - // Create the snapshotter. - var ( - s ctdsnapshot.Snapshotter - ) - switch c.backend { - case types.NativeBackend: - s, err = native.NewSnapshotter(snapshotRoot) - case types.OverlayFSBackend: - // On some distros such as Ubuntu overlayfs can be mounted without privileges - s, err = overlay.NewSnapshotter(snapshotRoot) - default: - // "auto" backend needs to be already resolved on Client instantiation - return opt, fmt.Errorf("%s is not a valid snapshots backend", c.backend) - } - if err != nil { - return opt, fmt.Errorf("creating %s snapshotter failed: %v", c.backend, err) - } - - var exe executor.Executor - if withExecutor { - exeOpt := runcexecutor.Opt{ - Root: filepath.Join(c.root, "executor"), - Rootless: unprivileged, - ProcessMode: processMode(), - } - - np, err := netproviders.Providers(netproviders.Opt{Mode: "auto"}) - if err != nil { - return base.WorkerOpt{}, err - } - - exe, err = runcexecutor.New(exeOpt, np) - if err != nil { - return opt, err - } - } - - // Create the content store locally. - contentStore, err := local.NewStore(filepath.Join(c.root, "content")) - if err != nil { - return opt, err - } - - // Open the bolt database for metadata. - db, err := bolt.Open(filepath.Join(c.root, "containerdmeta.db"), 0644, nil) - if err != nil { - return opt, err - } - - // Create the new database for metadata. - mdb := ctdmetadata.NewDB(db, contentStore, map[string]ctdsnapshot.Snapshotter{ - c.backend: s, - }) - if err := mdb.Init(context.TODO()); err != nil { - return opt, err - } - - // Create the image store. - imageStore := ctdmetadata.NewImageStore(mdb) - - contentStore = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit") - - id, err := base.ID(c.root) - if err != nil { - return opt, err - } - - xlabels := base.Labels("oci", c.backend) - - var supportedPlatforms []specs.Platform - for _, p := range binfmt_misc.SupportedPlatforms(false) { - parsed, err := platforms.Parse(p) - if err != nil { - return opt, err - } - supportedPlatforms = append(supportedPlatforms, platforms.Normalize(parsed)) - } - - opt = base.WorkerOpt{ - ID: id, - Labels: xlabels, - MetadataStore: md, - Executor: exe, - Snapshotter: containerdsnapshot.NewSnapshotter(c.backend, mdb.Snapshotter(c.backend), "buildkit", nil), - ContentStore: contentStore, - Applier: apply.NewFileSystemApplier(contentStore), - Differ: walking.NewWalkingDiff(contentStore), - ImageStore: imageStore, - Platforms: supportedPlatforms, - RegistryHosts: docker.ConfigureDefaultRegistries(), - LeaseManager: leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit"), - GarbageCollect: mdb.GarbageCollect, - } - - return opt, err -} - -func processMode() executoroci.ProcessMode { - mountArgs := []string{"-t", "proc", "none", "/proc"} - cmd := exec.Command("mount", mountArgs...) - cmd.SysProcAttr = &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGKILL, - Cloneflags: syscall.CLONE_NEWPID, - Unshareflags: syscall.CLONE_NEWNS, - } - if b, err := cmd.CombinedOutput(); err != nil { - logrus.Warnf("Process sandbox is not available, consider unmasking procfs: %v", string(b)) - return executoroci.NoProcessSandbox - } - return executoroci.ProcessSandbox -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go deleted file mode 100644 index 3cddb13d..00000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go +++ /dev/null @@ -1,106 +0,0 @@ -package registry - -import ( - "context" - "encoding/json" - - "github.com/moby/buildkit/cache/remotecache" - v1 "github.com/moby/buildkit/cache/remotecache/v1" - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { - return func(ctx context.Context, _ map[string]string) (remotecache.Exporter, error) { - return NewExporter(), nil - } -} - -func NewExporter() remotecache.Exporter { - cc := v1.NewCacheChains() - return &exporter{CacheExporterTarget: cc, chains: cc} -} - -type exporter struct { - solver.CacheExporterTarget - chains *v1.CacheChains -} - -func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { - return nil, nil -} - -func (ce *exporter) reset() { - cc := v1.NewCacheChains() - ce.CacheExporterTarget = cc - ce.chains = cc -} - -func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { - config, descs, err := ce.chains.Marshal() - if err != nil { - return nil, err - } - - descs2 := map[digest.Digest]v1.DescriptorProviderPair{} - for _, k := range layers { - if v, ok := descs[k]; ok { - descs2[k] = v - continue - } - // fallback for uncompressed digests - for _, v := range descs { - if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) { - descs2[v.Descriptor.Digest] = v - } - } - } - - cc := v1.NewCacheChains() - if err := v1.ParseConfig(*config, descs2, cc); err != nil { - return nil, err - } - - cfg, _, err := cc.Marshal() - if err != nil { - return nil, err - } - - if len(cfg.Layers) == 0 { - logrus.Warn("failed to match any cache with layers") - return nil, nil - } - - cache := map[int]int{} - - // reorder layers based on the order in the image - for i, r := range cfg.Records { - for j, rr := range r.Results { - n := getSortedLayerIndex(rr.LayerIndex, cfg.Layers, cache) - rr.LayerIndex = n - r.Results[j] = rr - cfg.Records[i] = r - } - } - - dt, err := json.Marshal(cfg.Records) - if err != nil { - return nil, err - } - ce.reset() - - return dt, nil -} - -func getSortedLayerIndex(idx int, layers []v1.CacheLayer, cache map[int]int) int { - if idx == -1 { - return -1 - } - l := layers[idx] - if i, ok := cache[idx]; ok { - return i - } - cache[idx] = getSortedLayerIndex(l.ParentIndex, layers, cache) + 1 - return cache[idx] -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go deleted file mode 100644 index f66d5b4a..00000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ /dev/null @@ -1,83 +0,0 @@ -package local - -import ( - "context" - "time" - - "github.com/containerd/containerd/content" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/session" - sessioncontent "github.com/moby/buildkit/session/content" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - attrDigest = "digest" - attrSrc = "src" - attrDest = "dest" - contentStoreIDPrefix = "local:" -) - -// ResolveCacheExporterFunc for "local" cache exporter. -func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc { - return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) { - store := attrs[attrDest] - if store == "" { - return nil, errors.New("local cache exporter requires dest") - } - csID := contentStoreIDPrefix + store - cs, err := getContentStore(ctx, sm, csID) - if err != nil { - return nil, err - } - return remotecache.NewExporter(cs), nil - } -} - -// ResolveCacheImporterFunc for "local" cache importer. -func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc { - return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { - dgstStr := attrs[attrDigest] - if dgstStr == "" { - return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest") - } - dgst := digest.Digest(dgstStr) - store := attrs[attrSrc] - if store == "" { - return nil, specs.Descriptor{}, errors.New("local cache importer requires src") - } - csID := contentStoreIDPrefix + store - cs, err := getContentStore(ctx, sm, csID) - if err != nil { - return nil, specs.Descriptor{}, err - } - info, err := cs.Info(ctx, dgst) - if err != nil { - return nil, specs.Descriptor{}, err - } - desc := specs.Descriptor{ - // MediaType is typically MediaTypeDockerSchema2ManifestList, - // but we leave it empty until we get correct support for local index.json - Digest: dgst, - Size: info.Size, - } - return remotecache.NewImporter(cs), desc, nil - } -} - -func getContentStore(ctx context.Context, sm *session.Manager, storeID string) (content.Store, error) { - sessionID := session.FromContext(ctx) - if sessionID == "" { - return nil, errors.New("local cache exporter/importer requires session") - } - timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - caller, err := sm.Get(timeoutCtx, sessionID) - if err != nil { - return nil, err - } - return sessioncontent.NewCallerStore(caller, storeID), nil -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go deleted file mode 100644 index a172917a..00000000 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ /dev/null @@ -1,96 +0,0 @@ -package registry - -import ( - "context" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/resolver" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func canonicalizeRef(rawRef string) (string, error) { - if rawRef == "" { - return "", errors.New("missing ref") - } - parsed, err := reference.ParseNormalizedNamed(rawRef) - if err != nil { - return "", err - } - return reference.TagNameOnly(parsed).String(), nil -} - -const ( - attrRef = "ref" -) - -func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { - return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) { - ref, err := canonicalizeRef(attrs[attrRef]) - if err != nil { - return nil, err - } - remote := resolver.New(ctx, hosts, sm) - pusher, err := remote.Pusher(ctx, ref) - if err != nil { - return nil, err - } - return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil - } -} - -func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docker.RegistryHosts) remotecache.ResolveCacheImporterFunc { - return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { - ref, err := canonicalizeRef(attrs[attrRef]) - if err != nil { - return nil, specs.Descriptor{}, err - } - remote := resolver.New(ctx, hosts, sm) - xref, desc, err := remote.Resolve(ctx, ref) - if err != nil { - return nil, specs.Descriptor{}, err - } - fetcher, err := remote.Fetcher(ctx, xref) - if err != nil { - return nil, specs.Descriptor{}, err - } - src := &withDistributionSourceLabel{ - Provider: contentutil.FromFetcher(fetcher), - ref: ref, - source: cs, - } - return remotecache.NewImporter(src), desc, nil - } -} - -type withDistributionSourceLabel struct { - content.Provider - ref string - source content.Manager -} - -var _ remotecache.DistributionSourceLabelSetter = &withDistributionSourceLabel{} - -func (dsl *withDistributionSourceLabel) SetDistributionSourceLabel(ctx context.Context, dgst digest.Digest) error { - hf, err := docker.AppendDistributionSourceLabel(dsl.source, dsl.ref) - if err != nil { - return err - } - _, err = hf(ctx, ocispec.Descriptor{Digest: dgst}) - return err -} - -func (dsl *withDistributionSourceLabel) SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor { - if desc.Annotations == nil { - desc.Annotations = map[string]string{} - } - desc.Annotations["containerd.io/distribution.source.ref"] = dsl.ref - return desc -} diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go deleted file mode 100644 index 0dbd4737..00000000 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ /dev/null @@ -1,109 +0,0 @@ -package imagemetaresolver - -import ( - "context" - "net/http" - "sync" - - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - "github.com/docker/docker/pkg/locker" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/util/contentutil" - "github.com/moby/buildkit/util/imageutil" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -var defaultImageMetaResolver llb.ImageMetaResolver -var defaultImageMetaResolverOnce sync.Once - -var WithDefault = imageOptionFunc(func(ii *llb.ImageInfo) { - llb.WithMetaResolver(Default()).SetImageOption(ii) -}) - -type imageMetaResolverOpts struct { - platform *specs.Platform -} - -type ImageMetaResolverOpt func(o *imageMetaResolverOpts) - -func WithDefaultPlatform(p *specs.Platform) ImageMetaResolverOpt { - return func(o *imageMetaResolverOpts) { - o.platform = p - } -} - -func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { - var opts imageMetaResolverOpts - for _, f := range with { - f(&opts) - } - return &imageMetaResolver{ - resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, - }), - platform: opts.platform, - buffer: contentutil.NewBuffer(), - cache: map[string]resolveResult{}, - locker: locker.New(), - } -} - -func Default() llb.ImageMetaResolver { - defaultImageMetaResolverOnce.Do(func() { - defaultImageMetaResolver = New() - }) - return defaultImageMetaResolver -} - -type imageMetaResolver struct { - resolver remotes.Resolver - buffer contentutil.Buffer - platform *specs.Platform - locker *locker.Locker - cache map[string]resolveResult -} - -type resolveResult struct { - config []byte - dgst digest.Digest -} - -func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - imr.locker.Lock(ref) - defer imr.locker.Unlock(ref) - - platform := opt.Platform - if platform == nil { - platform = imr.platform - } - - k := imr.key(ref, platform) - - if res, ok := imr.cache[k]; ok { - return res.dgst, res.config, nil - } - - dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform) - if err != nil { - return "", nil, err - } - - imr.cache[k] = resolveResult{dgst: dgst, config: config} - return dgst, config, nil -} - -func (imr *imageMetaResolver) key(ref string, platform *specs.Platform) string { - if platform != nil { - ref += platforms.Format(*platform) - } - return ref -} - -type imageOptionFunc func(*llb.ImageInfo) - -func (fn imageOptionFunc) SetImageOption(ii *llb.ImageInfo) { - fn(ii) -} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go deleted file mode 100644 index 26e432e6..00000000 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ /dev/null @@ -1,337 +0,0 @@ -package runcexecutor - -import ( - "context" - "encoding/json" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/containerd/containerd/mount" - containerdoci "github.com/containerd/containerd/oci" - "github.com/containerd/continuity/fs" - runc "github.com/containerd/go-runc" - "github.com/docker/docker/pkg/idtools" - "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/executor/oci" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/network" - rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type Opt struct { - // root directory - Root string - CommandCandidates []string - // without root privileges (has nothing to do with Opt.Root directory) - Rootless bool - // DefaultCgroupParent is the cgroup-parent name for executor - DefaultCgroupParent string - // ProcessMode - ProcessMode oci.ProcessMode - IdentityMapping *idtools.IdentityMapping - // runc run --no-pivot (unrecommended) - NoPivot bool - DNS *oci.DNSConfig - OOMScoreAdj *int -} - -var defaultCommandCandidates = []string{"buildkit-runc", "runc"} - -type runcExecutor struct { - runc *runc.Runc - root string - cmd string - cgroupParent string - rootless bool - networkProviders map[pb.NetMode]network.Provider - processMode oci.ProcessMode - idmap *idtools.IdentityMapping - noPivot bool - dns *oci.DNSConfig - oomScoreAdj *int -} - -func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { - cmds := opt.CommandCandidates - if cmds == nil { - cmds = defaultCommandCandidates - } - - var cmd string - var found bool - for _, cmd = range cmds { - if _, err := exec.LookPath(cmd); err == nil { - found = true - break - } - } - if !found { - return nil, errors.Errorf("failed to find %s binary", cmd) - } - - root := opt.Root - - if err := os.MkdirAll(root, 0711); err != nil { - return nil, errors.Wrapf(err, "failed to create %s", root) - } - - root, err := filepath.Abs(root) - if err != nil { - return nil, err - } - root, err = filepath.EvalSymlinks(root) - if err != nil { - return nil, err - } - - // clean up old hosts/resolv.conf file. ignore errors - os.RemoveAll(filepath.Join(root, "hosts")) - os.RemoveAll(filepath.Join(root, "resolv.conf")) - - runtime := &runc.Runc{ - Command: cmd, - Log: filepath.Join(root, "runc-log.json"), - LogFormat: runc.JSON, - PdeathSignal: syscall.SIGKILL, // this can still leak the process - Setpgid: true, - // we don't execute runc with --rootless=(true|false) explicitly, - // so as to support non-runc runtimes - } - - w := &runcExecutor{ - runc: runtime, - root: root, - cgroupParent: opt.DefaultCgroupParent, - rootless: opt.Rootless, - networkProviders: networkProviders, - processMode: opt.ProcessMode, - idmap: opt.IdentityMapping, - noPivot: opt.NoPivot, - dns: opt.DNS, - oomScoreAdj: opt.OOMScoreAdj, - } - return w, nil -} - -func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { - provider, ok := w.networkProviders[meta.NetMode] - if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) - } - namespace, err := provider.New() - if err != nil { - return err - } - defer namespace.Close() - - if meta.NetMode == pb.NetMode_HOST { - logrus.Info("enabling HostNetworking") - } - - resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns) - if err != nil { - return err - } - - hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap) - if err != nil { - return err - } - if clean != nil { - defer clean() - } - - mountable, err := root.Mount(ctx, false) - if err != nil { - return err - } - - rootMount, release, err := mountable.Mount() - if err != nil { - return err - } - if release != nil { - defer release() - } - - id := identity.NewID() - bundle := filepath.Join(w.root, id) - - if err := os.Mkdir(bundle, 0711); err != nil { - return err - } - defer os.RemoveAll(bundle) - - identity := idtools.Identity{} - if w.idmap != nil { - identity = w.idmap.RootPair() - } - - rootFSPath := filepath.Join(bundle, "rootfs") - if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil { - return err - } - if err := mount.All(rootMount, rootFSPath); err != nil { - return err - } - defer mount.Unmount(rootFSPath, 0) - - uid, gid, sgids, err := oci.GetUser(ctx, rootFSPath, meta.User) - if err != nil { - return err - } - - f, err := os.Create(filepath.Join(bundle, "config.json")) - if err != nil { - return err - } - defer f.Close() - - opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)} - - if meta.ReadonlyRootFS { - opts = append(opts, containerdoci.WithRootFSReadonly()) - } - - identity = idtools.Identity{ - UID: int(uid), - GID: int(gid), - } - if w.idmap != nil { - identity, err = w.idmap.ToHost(identity) - if err != nil { - return err - } - } - - if w.cgroupParent != "" { - var cgroupsPath string - lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:] - if strings.Contains(w.cgroupParent, ".slice") && lastSeparator == ":" { - cgroupsPath = w.cgroupParent + id - } else { - cgroupsPath = filepath.Join("/", w.cgroupParent, "buildkit", id) - } - opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) - } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, w.idmap, opts...) - if err != nil { - return err - } - defer cleanup() - - spec.Root.Path = rootFSPath - if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type - spec.Root.Readonly = true - } - - newp, err := fs.RootPath(rootFSPath, meta.Cwd) - if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) - } - if _, err := os.Stat(newp); err != nil { - if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) - } - } - - spec.Process.OOMScoreAdj = w.oomScoreAdj - if w.rootless { - if err := rootlessspecconv.ToRootless(spec); err != nil { - return err - } - } - - if err := json.NewEncoder(f).Encode(spec); err != nil { - return err - } - - // runCtx/killCtx is used for extra check in case the kill command blocks - runCtx, cancelRun := context.WithCancel(context.Background()) - defer cancelRun() - - done := make(chan struct{}) - go func() { - for { - select { - case <-ctx.Done(): - killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) - if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { - logrus.Errorf("failed to kill runc %s: %+v", id, err) - select { - case <-killCtx.Done(): - timeout() - cancelRun() - return - default: - } - } - timeout() - select { - case <-time.After(50 * time.Millisecond): - case <-done: - return - } - case <-done: - return - } - } - }() - - logrus.Debugf("> creating %s %v", id, meta.Args) - status, err := w.runc.Run(runCtx, id, bundle, &runc.CreateOpts{ - IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr}, - NoPivot: w.noPivot, - }) - close(done) - - if status != 0 || err != nil { - if err == nil { - err = errors.Errorf("exit code: %d", status) - } - select { - case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), err.Error()) - default: - return err - } - } - - return nil -} - -type forwardIO struct { - stdin io.ReadCloser - stdout, stderr io.WriteCloser -} - -func (s *forwardIO) Close() error { - return nil -} - -func (s *forwardIO) Set(cmd *exec.Cmd) { - cmd.Stdin = s.stdin - cmd.Stdout = s.stdout - cmd.Stderr = s.stderr -} - -func (s *forwardIO) Stdin() io.WriteCloser { - return nil -} - -func (s *forwardIO) Stdout() io.ReadCloser { - return nil -} - -func (s *forwardIO) Stderr() io.ReadCloser { - return nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go deleted file mode 100644 index 9bc29378..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ /dev/null @@ -1,641 +0,0 @@ -package builder - -import ( - "archive/tar" - "bytes" - "context" - "encoding/csv" - "encoding/json" - "fmt" - "net" - "path" - "regexp" - "strconv" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/docker/docker/builder/dockerignore" - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/frontend/gateway/client" - gwpb "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -const ( - DefaultLocalNameContext = "context" - DefaultLocalNameDockerfile = "dockerfile" - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCacheNS = "build-arg:BUILDKIT_CACHE_MOUNT_NS" - defaultDockerfileName = "Dockerfile" - dockerignoreFilename = ".dockerignore" - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - keyNoCache = "no-cache" - keyTargetPlatform = "platform" - keyMultiPlatform = "multi-platform" - keyImageResolveMode = "image-resolve-mode" - keyGlobalAddHosts = "add-hosts" - keyForceNetwork = "force-network-mode" - keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyContextSubDir = "contextsubdir" - keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" -) - -var httpPrefix = regexp.MustCompile(`^https?://`) -var gitUrlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) - -func Build(ctx context.Context, c client.Client) (*client.Result, error) { - opts := c.BuildOpts().Opts - caps := c.BuildOpts().LLBCaps - gwcaps := c.BuildOpts().Caps - - marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} - - localNameContext := DefaultLocalNameContext - if v, ok := opts[keyNameContext]; ok { - localNameContext = v - } - - forceLocalDockerfile := false - localNameDockerfile := DefaultLocalNameDockerfile - if v, ok := opts[keyNameDockerfile]; ok { - forceLocalDockerfile = true - localNameDockerfile = v - } - - defaultBuildPlatform := platforms.DefaultSpec() - if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { - defaultBuildPlatform = workers[0].Platforms[0] - } - - buildPlatforms := []specs.Platform{defaultBuildPlatform} - targetPlatforms := []*specs.Platform{nil} - if v := opts[keyTargetPlatform]; v != "" { - var err error - targetPlatforms, err = parsePlatforms(v) - if err != nil { - return nil, err - } - } - - resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) - if err != nil { - return nil, err - } - - extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse additional hosts") - } - - defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) - if err != nil { - return nil, err - } - - filename := opts[keyFilename] - if filename == "" { - filename = defaultDockerfileName - } - - var ignoreCache []string - if v, ok := opts[keyNoCache]; ok { - if v == "" { - ignoreCache = []string{} // means all stages - } else { - ignoreCache = strings.Split(v, ",") - } - } - - name := "load build definition from " + filename - - src := llb.Local(localNameDockerfile, - llb.FollowPaths([]string{filename, filename + ".dockerignore"}), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint(localNameDockerfile), - dockerfile2llb.WithInternalName(name), - ) - - fileop := useFileOp(opts, &caps) - - var buildContext *llb.State - isNotLocalContext := false - if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok { - if !forceLocalDockerfile { - src = *st - } - buildContext = st - } else if httpPrefix.MatchString(opts[localNameContext]) { - httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context")) - def, err := httpContext.Marshal(marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal httpcontext") - } - res, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to resolve httpcontext") - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - dt, err := ref.ReadFile(ctx, client.ReadRequest{ - Filename: "context", - Range: &client.FileRange{ - Length: 1024, - }, - }) - if err != nil { - return nil, errors.Errorf("failed to read downloaded context") - } - if isArchive(dt) { - if fileop { - bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ - AttemptUnpack: true, - })) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } else { - copyImage := opts[keyOverrideCopyImage] - if copyImage == "" { - copyImage = dockerfile2llb.DefaultCopyImage - } - unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context")) - unpack.AddMount("/src", httpContext, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } - } else { - filename = "context" - if !forceLocalDockerfile { - src = httpContext - } - buildContext = &httpContext - isNotLocalContext = true - } - } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, errors.Wrapf(err, "failed to get frontend inputs") - } - - if !forceLocalDockerfile { - inputDockerfile, ok := inputs[DefaultLocalNameDockerfile] - if ok { - src = inputDockerfile - } - } - - inputCtx, ok := inputs[DefaultLocalNameContext] - if ok { - buildContext = &inputCtx - isNotLocalContext = true - } - } - - if buildContext != nil { - if sub, ok := opts[keyContextSubDir]; ok { - buildContext = scopeToSubDir(buildContext, fileop, sub) - } - } - - def, err := src.Marshal(marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal local source") - } - - eg, ctx2 := errgroup.WithContext(ctx) - var dtDockerfile []byte - var dtDockerignore []byte - var dtDockerignoreDefault []byte - eg.Go(func() error { - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return errors.Wrapf(err, "failed to resolve dockerfile") - } - - ref, err := res.SingleRef() - if err != nil { - return err - } - - dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename, - }) - if err != nil { - return errors.Wrapf(err, "failed to read dockerfile") - } - - dt, err := ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename + ".dockerignore", - }) - if err == nil { - dtDockerignore = dt - } - return nil - }) - var excludes []string - if !isNotLocalContext { - eg.Go(func() error { - dockerignoreState := buildContext - if dockerignoreState == nil { - st := llb.Local(localNameContext, - llb.SessionID(c.BuildOpts().SessionID), - llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint(localNameContext+"-"+dockerignoreFilename), - dockerfile2llb.WithInternalName("load "+dockerignoreFilename), - ) - dockerignoreState = &st - } - def, err := dockerignoreState.Marshal(marshalOpts...) - if err != nil { - return err - } - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return err - } - ref, err := res.SingleRef() - if err != nil { - return err - } - dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: dockerignoreFilename, - }) - if err != nil { - return nil - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if dtDockerignore == nil { - dtDockerignore = dtDockerignoreDefault - } - if dtDockerignore != nil { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse dockerignore") - } - } - - if _, ok := opts["cmdline"]; !ok { - ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) - if ok { - return forwardGateway(ctx, c, ref, cmdline) - } - } - - exportMap := len(targetPlatforms) > 1 - - if v := opts[keyMultiPlatform]; v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Errorf("invalid boolean value %s", v) - } - if !b && exportMap { - return nil, errors.Errorf("returning multiple target plaforms is not allowed") - } - exportMap = b - } - - expPlatforms := &exptypes.Platforms{ - Platforms: make([]exptypes.Platform, len(targetPlatforms)), - } - res := client.NewResult() - - eg, ctx = errgroup.WithContext(ctx) - - for i, tp := range targetPlatforms { - func(i int, tp *specs.Platform) { - eg.Go(func() error { - st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ - Target: opts[keyTarget], - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNS], - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: tp, - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ForceNetMode: defaultNetMode, - OverrideCopyImage: opts[keyOverrideCopyImage], - LLBCaps: &caps, - }) - - if err != nil { - return errors.Wrapf(err, "failed to create LLB definition") - } - - def, err := st.Marshal() - if err != nil { - return errors.Wrapf(err, "failed to marshal LLB definition") - } - - config, err := json.Marshal(img) - if err != nil { - return errors.Wrapf(err, "failed to marshal image config") - } - - var cacheImports []client.CacheOptionsEntry - // new API - if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { - var cacheImportsUM []controlapi.CacheOptionsEntry - if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { - return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) - } - for _, um := range cacheImportsUM { - cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) - } - } - // old API - if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { - cacheFrom := strings.Split(cacheFromStr, ",") - for _, s := range cacheFrom { - im := client.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": s, - }, - } - // FIXME(AkihiroSuda): skip append if already exists - cacheImports = append(cacheImports, im) - } - } - - r, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - CacheImports: cacheImports, - }) - if err != nil { - return err - } - - ref, err := r.SingleRef() - if err != nil { - return err - } - - if !exportMap { - res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.SetRef(ref) - } else { - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - - k := platforms.Format(p) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddRef(k, ref) - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } - return nil - }) - }(i, tp) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if exportMap { - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err - } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) - } - - return res, nil -} - -func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) { - opts := c.BuildOpts().Opts - if opts == nil { - opts = map[string]string{} - } - opts["cmdline"] = cmdline - opts["source"] = ref - - gwcaps := c.BuildOpts().Caps - var frontendInputs map[string]*pb.Definition - if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, errors.Wrapf(err, "failed to get frontend inputs") - } - - frontendInputs = make(map[string]*pb.Definition) - for name, state := range inputs { - def, err := state.Marshal() - if err != nil { - return nil, err - } - frontendInputs[name] = def.ToPB() - } - } - - return c.Solve(ctx, client.SolveRequest{ - Frontend: "gateway.v0", - FrontendOpt: opts, - FrontendInputs: frontendInputs, - }) -} - -func filter(opt map[string]string, key string) map[string]string { - m := map[string]string{} - for k, v := range opt { - if strings.HasPrefix(k, key) { - m[strings.TrimPrefix(k, key)] = v - } - } - return m -} - -func detectGitContext(ref, gitContext string) (*llb.State, bool) { - found := false - if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { - found = true - } - - keepGit := false - if gitContext != "" { - if v, err := strconv.ParseBool(gitContext); err == nil { - keepGit = v - } - } - - for _, prefix := range []string{"git://", "github.com/", "git@"} { - if strings.HasPrefix(ref, prefix) { - found = true - break - } - } - if !found { - return nil, false - } - - parts := strings.SplitN(ref, "#", 2) - branch := "" - if len(parts) > 1 { - branch = parts[1] - } - gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} - if keepGit { - gitOpts = append(gitOpts, llb.KeepGitDir()) - } - - st := llb.Git(parts[0], branch, gitOpts...) - return &st, true -} - -func isArchive(header []byte) bool { - for _, m := range [][]byte{ - {0x42, 0x5A, 0x68}, // bzip2 - {0x1F, 0x8B, 0x08}, // gzip - {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz - } { - if len(header) < len(m) { - continue - } - if bytes.Equal(m, header[:len(m)]) { - return true - } - } - - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func parsePlatforms(v string) ([]*specs.Platform, error) { - var pp []*specs.Platform - for _, v := range strings.Split(v, ",") { - p, err := platforms.Parse(v) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse target platform %s", v) - } - p = platforms.Normalize(p) - pp = append(pp, &p) - } - return pp, nil -} - -func parseResolveMode(v string) (llb.ResolveMode, error) { - switch v { - case pb.AttrImageResolveModeDefault, "": - return llb.ResolveModeDefault, nil - case pb.AttrImageResolveModeForcePull: - return llb.ResolveModeForcePull, nil - case pb.AttrImageResolveModePreferLocal: - return llb.ResolveModePreferLocal, nil - default: - return 0, errors.Errorf("invalid image-resolve-mode: %s", v) - } -} - -func parseExtraHosts(v string) ([]llb.HostIP, error) { - if v == "" { - return nil, nil - } - out := make([]llb.HostIP, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid key-value pair %s", field) - } - key := strings.ToLower(parts[0]) - val := strings.ToLower(parts[1]) - ip := net.ParseIP(val) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", val) - } - out = append(out, llb.HostIP{Host: key, IP: ip}) - } - return out, nil -} - -func parseNetMode(v string) (pb.NetMode, error) { - if v == "" { - return llb.NetModeSandbox, nil - } - switch v { - case "none": - return llb.NetModeNone, nil - case "host": - return llb.NetModeHost, nil - case "sandbox": - return llb.NetModeSandbox, nil - default: - return 0, errors.Errorf("invalid netmode %s", v) - } -} - -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} - -func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State { - if fileop { - bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ - CopyDirContentsOnly: true, - })) - return &bc - } - unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context")) - unpack.AddMount("/src", *c, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) - return &bc -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go deleted file mode 100644 index f23c6874..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package command contains the set of Dockerfile commands. -package command - -// Define constants for the command strings -const ( - Add = "add" - Arg = "arg" - Cmd = "cmd" - Copy = "copy" - Entrypoint = "entrypoint" - Env = "env" - Expose = "expose" - From = "from" - Healthcheck = "healthcheck" - Label = "label" - Maintainer = "maintainer" - Onbuild = "onbuild" - Run = "run" - Shell = "shell" - StopSignal = "stopsignal" - User = "user" - Volume = "volume" - Workdir = "workdir" -) - -// Commands is list of all Dockerfile commands -var Commands = map[string]struct{}{ - Add: {}, - Arg: {}, - Cmd: {}, - Copy: {}, - Entrypoint: {}, - Env: {}, - Expose: {}, - From: {}, - Healthcheck: {}, - Label: {}, - Maintainer: {}, - Onbuild: {}, - Run: {}, - Shell: {}, - StopSignal: {}, - User: {}, - Volume: {}, - Workdir: {}, -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go deleted file mode 100644 index b911938a..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ /dev/null @@ -1,1323 +0,0 @@ -package dockerfile2llb - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math" - "net/url" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/signal" - "github.com/docker/go-connections/nat" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/client/llb/imagemetaresolver" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/moby/buildkit/frontend/dockerfile/shell" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" -) - -const ( - emptyImageName = "scratch" - defaultContextLocalName = "context" - historyComment = "buildkit.dockerfile.v0" - - DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061" -) - -type ConvertOpt struct { - Target string - MetaResolver llb.ImageMetaResolver - BuildArgs map[string]string - Labels map[string]string - SessionID string - BuildContext *llb.State - Excludes []string - // IgnoreCache contains names of the stages that should not use build cache. - // Empty slice means ignore cache for all stages. Nil doesn't disable cache. - IgnoreCache []string - // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *specs.Platform - BuildPlatforms []specs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ForceNetMode pb.NetMode - OverrideCopyImage string - LLBCaps *apicaps.CapSet - ContextLocalName string -} - -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { - if len(dt) == 0 { - return nil, nil, errors.Errorf("the Dockerfile cannot be empty") - } - - if opt.ContextLocalName == "" { - opt.ContextLocalName = defaultContextLocalName - } - - platformOpt := buildPlatformOpt(&opt) - - optMetaArgs := getPlatformArgs(platformOpt) - for i, arg := range optMetaArgs { - optMetaArgs[i] = setKVValue(arg, opt.BuildArgs) - } - - dockerfile, err := parser.Parse(bytes.NewReader(dt)) - if err != nil { - return nil, nil, err - } - - proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs) - - stages, metaArgs, err := instructions.Parse(dockerfile.AST) - if err != nil { - return nil, nil, err - } - - shlex := shell.NewLex(dockerfile.EscapeToken) - - for _, metaArg := range metaArgs { - if metaArg.Value != nil { - *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) - } - optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs)) - } - - metaResolver := opt.MetaResolver - if metaResolver == nil { - metaResolver = imagemetaresolver.Default() - } - - allDispatchStates := newDispatchStates() - - // set base state for every image - for i, st := range stages { - name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) - if err != nil { - return nil, nil, err - } - if name == "" { - return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName) - } - st.BaseName = name - - ds := &dispatchState{ - stage: st, - deps: make(map[*dispatchState]struct{}), - ctxPaths: make(map[string]struct{}), - stageName: st.Name, - prefixPlatform: opt.PrefixPlatform, - } - - if st.Name == "" { - ds.stageName = fmt.Sprintf("stage-%d", i) - } - - if v := st.Platform; v != "" { - v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v) - } - - p, err := platforms.Parse(v) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v) - } - ds.platform = &p - } - allDispatchStates.addState(ds) - - total := 0 - if ds.stage.BaseName != emptyImageName && ds.base == nil { - total = 1 - } - for _, cmd := range ds.stage.Commands { - switch cmd.(type) { - case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: - total++ - case *instructions.WorkdirCommand: - if useFileOp(opt.BuildArgs, opt.LLBCaps) { - total++ - } - } - } - ds.cmdTotal = total - - if opt.IgnoreCache != nil { - if len(opt.IgnoreCache) == 0 { - ds.ignoreCache = true - } else if st.Name != "" { - for _, n := range opt.IgnoreCache { - if strings.EqualFold(n, st.Name) { - ds.ignoreCache = true - } - } - } - } - } - - var target *dispatchState - if opt.Target == "" { - target = allDispatchStates.lastTarget() - } else { - var ok bool - target, ok = allDispatchStates.findStateByName(opt.Target) - if !ok { - return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) - } - } - - // fill dependencies to stages so unreachable ones can avoid loading image configs - for _, d := range allDispatchStates.states { - d.commands = make([]command, len(d.stage.Commands)) - for i, cmd := range d.stage.Commands { - newCmd, err := toCommand(cmd, allDispatchStates) - if err != nil { - return nil, nil, err - } - d.commands[i] = newCmd - for _, src := range newCmd.sources { - if src != nil { - d.deps[src] = struct{}{} - if src.unregistered { - allDispatchStates.addState(src) - } - } - } - } - } - - if has, state := hasCircularDependency(allDispatchStates.states); has { - return nil, nil, fmt.Errorf("circular dependency detected on stage: %s", state.stageName) - } - - if len(allDispatchStates.states) == 1 { - allDispatchStates.states[0].stageName = "" - } - - eg, ctx := errgroup.WithContext(ctx) - for i, d := range allDispatchStates.states { - reachable := isReachable(target, d) - // resolve image config for every stage - if d.base == nil { - if d.stage.BaseName == emptyImageName { - d.state = llb.Scratch() - d.image = emptyImage(platformOpt.targetPlatform) - continue - } - func(i int, d *dispatchState) { - eg.Go(func() error { - ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) - if err != nil { - return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName) - } - platform := d.platform - if platform == nil { - platform = &platformOpt.targetPlatform - } - d.stage.BaseName = reference.TagNameOnly(ref).String() - var isScratch bool - if metaResolver != nil && reachable && !d.unregistered { - prefix := "[" - if opt.PrefixPlatform && platform != nil { - prefix += platforms.Format(*platform) + " " - } - prefix += "internal]" - dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), - }) - if err == nil { // handle the error while builder is actually running - var img Image - if err := json.Unmarshal(dt, &img); err != nil { - return err - } - img.Created = nil - // if there is no explicit target platform, try to match based on image config - if d.platform == nil && platformOpt.implicitTarget { - p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms) - platform = &p - } - d.image = img - if dgst != "" { - ref, err = reference.WithDigest(ref, dgst) - if err != nil { - return err - } - } - d.stage.BaseName = ref.String() - if len(img.RootFS.DiffIDs) == 0 { - isScratch = true - // schema1 images can't return diffIDs so double check :( - for _, h := range img.History { - if !h.EmptyLayer { - isScratch = false - break - } - } - } - } - } - if isScratch { - d.state = llb.Scratch() - } else { - d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform))) - } - d.platform = platform - return nil - }) - }(i, d) - } - } - - if err := eg.Wait(); err != nil { - return nil, nil, err - } - - buildContext := &mutableOutput{} - ctxPaths := map[string]struct{}{} - - for _, d := range allDispatchStates.states { - if !isReachable(target, d) { - continue - } - if d.base != nil { - d.state = d.base.state - d.platform = d.base.platform - d.image = clone(d.base.image) - } - - // make sure that PATH is always set - if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv) - } - - // initialize base metadata from image conf - for _, env := range d.image.Config.Env { - k, v := parseKeyValue(env) - d.state = d.state.AddEnv(k, v) - } - if d.image.Config.WorkingDir != "" { - if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { - return nil, nil, err - } - } - if d.image.Config.User != "" { - if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, err - } - } - d.state = d.state.Network(opt.ForceNetMode) - - opt := dispatchOpt{ - allDispatchStates: allDispatchStates, - metaArgs: optMetaArgs, - buildArgValues: opt.BuildArgs, - shlex: shlex, - sessionID: opt.SessionID, - buildContext: llb.NewState(buildContext), - proxyEnv: proxyEnv, - cacheIDNamespace: opt.CacheIDNamespace, - buildPlatforms: platformOpt.buildPlatforms, - targetPlatform: platformOpt.targetPlatform, - extraHosts: opt.ExtraHosts, - copyImage: opt.OverrideCopyImage, - llbCaps: opt.LLBCaps, - } - if opt.copyImage == "" { - opt.copyImage = DefaultCopyImage - } - - if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, err - } - d.image.Config.OnBuild = nil - - for _, cmd := range d.commands { - if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, err - } - } - - for p := range d.ctxPaths { - ctxPaths[p] = struct{}{} - } - } - - if len(opt.Labels) != 0 && target.image.Config.Labels == nil { - target.image.Config.Labels = make(map[string]string, len(opt.Labels)) - } - for k, v := range opt.Labels { - target.image.Config.Labels[k] = v - } - - opts := []llb.LocalOption{ - llb.SessionID(opt.SessionID), - llb.ExcludePatterns(opt.Excludes), - llb.SharedKeyHint(opt.ContextLocalName), - WithInternalName("load build context"), - } - if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { - opts = append(opts, llb.FollowPaths(includePatterns)) - } - - bc := llb.Local(opt.ContextLocalName, opts...) - if opt.BuildContext != nil { - bc = *opt.BuildContext - } - buildContext.Output = bc.Output() - - defaults := []llb.ConstraintsOpt{ - llb.Platform(platformOpt.targetPlatform), - } - if opt.LLBCaps != nil { - defaults = append(defaults, llb.WithCaps(*opt.LLBCaps)) - } - st := target.state.SetMarshalDefaults(defaults...) - - if !platformOpt.implicitTarget { - target.image.OS = platformOpt.targetPlatform.OS - target.image.Architecture = platformOpt.targetPlatform.Architecture - target.image.Variant = platformOpt.targetPlatform.Variant - } - - return &st, &target.image, nil -} - -func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { - m := map[string]string{} - - for _, arg := range metaArgs { - m[arg.Key] = arg.ValueString() - } - - return m -} - -func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) { - cmd := command{Command: ic} - if c, ok := ic.(*instructions.CopyCommand); ok { - if c.From != "" { - var stn *dispatchState - index, err := strconv.Atoi(c.From) - if err != nil { - stn, ok = allDispatchStates.findStateByName(c.From) - if !ok { - stn = &dispatchState{ - stage: instructions.Stage{BaseName: c.From}, - deps: make(map[*dispatchState]struct{}), - unregistered: true, - } - } - } else { - stn, err = allDispatchStates.findStateByIndex(index) - if err != nil { - return command{}, err - } - } - cmd.sources = []*dispatchState{stn} - } - } - - if ok := detectRunMount(&cmd, allDispatchStates); ok { - return cmd, nil - } - - return cmd, nil -} - -type dispatchOpt struct { - allDispatchStates *dispatchStates - metaArgs []instructions.KeyValuePairOptional - buildArgValues map[string]string - shlex *shell.Lex - sessionID string - buildContext llb.State - proxyEnv *llb.ProxyEnv - cacheIDNamespace string - targetPlatform specs.Platform - buildPlatforms []specs.Platform - extraHosts []llb.HostIP - copyImage string - llbCaps *apicaps.CapSet -} - -func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { - if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok { - err := ex.Expand(func(word string) (string, error) { - return opt.shlex.ProcessWord(word, d.state.Env()) - }) - if err != nil { - return err - } - } - - var err error - switch c := cmd.Command.(type) { - case *instructions.MaintainerCommand: - err = dispatchMaintainer(d, c) - case *instructions.EnvCommand: - err = dispatchEnv(d, c) - case *instructions.RunCommand: - err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt) - case *instructions.WorkdirCommand: - err = dispatchWorkdir(d, c, true, &opt) - case *instructions.AddCommand: - err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, opt) - if err == nil { - for _, src := range c.Sources() { - if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { - d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} - } - } - } - case *instructions.LabelCommand: - err = dispatchLabel(d, c) - case *instructions.OnbuildCommand: - err = dispatchOnbuild(d, c) - case *instructions.CmdCommand: - err = dispatchCmd(d, c) - case *instructions.EntrypointCommand: - err = dispatchEntrypoint(d, c) - case *instructions.HealthCheckCommand: - err = dispatchHealthcheck(d, c) - case *instructions.ExposeCommand: - err = dispatchExpose(d, c, opt.shlex) - case *instructions.UserCommand: - err = dispatchUser(d, c, true) - case *instructions.VolumeCommand: - err = dispatchVolume(d, c) - case *instructions.StopSignalCommand: - err = dispatchStopSignal(d, c) - case *instructions.ShellCommand: - err = dispatchShell(d, c) - case *instructions.ArgCommand: - err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues) - case *instructions.CopyCommand: - l := opt.buildContext - if len(cmd.sources) != 0 { - l = cmd.sources[0].state - } - err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt) - if err == nil && len(cmd.sources) == 0 { - for _, src := range c.Sources() { - d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} - } - } - default: - } - return err -} - -type dispatchState struct { - state llb.State - image Image - platform *specs.Platform - stage instructions.Stage - base *dispatchState - deps map[*dispatchState]struct{} - buildArgs []instructions.KeyValuePairOptional - commands []command - ctxPaths map[string]struct{} - ignoreCache bool - cmdSet bool - unregistered bool - stageName string - cmdIndex int - cmdTotal int - prefixPlatform bool -} - -type dispatchStates struct { - states []*dispatchState - statesByName map[string]*dispatchState -} - -func newDispatchStates() *dispatchStates { - return &dispatchStates{statesByName: map[string]*dispatchState{}} -} - -func (dss *dispatchStates) addState(ds *dispatchState) { - dss.states = append(dss.states, ds) - - if d, ok := dss.statesByName[ds.stage.BaseName]; ok { - ds.base = d - } - if ds.stage.Name != "" { - dss.statesByName[strings.ToLower(ds.stage.Name)] = ds - } -} - -func (dss *dispatchStates) findStateByName(name string) (*dispatchState, bool) { - ds, ok := dss.statesByName[strings.ToLower(name)] - return ds, ok -} - -func (dss *dispatchStates) findStateByIndex(index int) (*dispatchState, error) { - if index < 0 || index >= len(dss.states) { - return nil, errors.Errorf("invalid stage index %d", index) - } - - return dss.states[index], nil -} - -func (dss *dispatchStates) lastTarget() *dispatchState { - return dss.states[len(dss.states)-1] -} - -type command struct { - instructions.Command - sources []*dispatchState -} - -func dispatchOnBuildTriggers(d *dispatchState, triggers []string, opt dispatchOpt) error { - for _, trigger := range triggers { - ast, err := parser.Parse(strings.NewReader(trigger)) - if err != nil { - return err - } - if len(ast.AST.Children) != 1 { - return errors.New("onbuild trigger should be a single expression") - } - ic, err := instructions.ParseCommand(ast.AST.Children[0]) - if err != nil { - return err - } - cmd, err := toCommand(ic, opt.allDispatchStates) - if err != nil { - return err - } - if err := dispatch(d, cmd, opt); err != nil { - return err - } - } - return nil -} - -func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { - commitMessage := bytes.NewBufferString("ENV") - for _, e := range c.Env { - commitMessage.WriteString(" " + e.String()) - d.state = d.state.AddEnv(e.Key, e.Value) - d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value) - } - return commitToHistory(&d.image, commitMessage.String(), false, nil) -} - -func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - env := d.state.Env() - opt := []llb.RunOption{llb.Args(args), dfCmd(c)} - if d.ignoreCache { - opt = append(opt, llb.IgnoreCache) - } - if proxy != nil { - opt = append(opt, llb.WithProxy(*proxy)) - } - - runMounts, err := dispatchRunMounts(d, c, sources, dopt) - if err != nil { - return err - } - opt = append(opt, runMounts...) - - securityOpt, err := dispatchRunSecurity(c) - if err != nil { - return err - } - if securityOpt != nil { - opt = append(opt, securityOpt) - } - - networkOpt, err := dispatchRunNetwork(c) - if err != nil { - return err - } - if networkOpt != nil { - opt = append(opt, networkOpt) - } - - shlex := *dopt.shlex - shlex.RawQuotes = true - shlex.SkipUnsetEnv = true - - opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(&shlex, c.String(), env)), d.prefixPlatform, d.state.GetPlatform()))) - for _, h := range dopt.extraHosts { - opt = append(opt, llb.AddExtraHost(h.Host, h.IP)) - } - d.state = d.state.Run(opt...).Root() - return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) -} - -func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { - d.state = d.state.Dir(c.Path) - wd := c.Path - if !path.IsAbs(c.Path) { - wd = path.Join("/", d.image.Config.WorkingDir, wd) - } - d.image.Config.WorkingDir = wd - if commit { - withLayer := false - if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { - mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} - if user := d.image.Config.User; user != "" { - mkdirOpt = append(mkdirOpt, llb.WithUser(user)) - } - platform := opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), d.state.Env())), d.prefixPlatform, &platform))) - withLayer = true - } - return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil) - } - return nil -} - -func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { - dest := path.Join("/", pathRelativeToWorkingDir(d.state, c.Dest())) - if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { - dest += string(filepath.Separator) - } - - var copyOpt []llb.CopyOption - - if chown != "" { - copyOpt = append(copyOpt, llb.WithUser(chown)) - } - - commitMessage := bytes.NewBufferString("") - if isAddCommand { - commitMessage.WriteString("ADD") - } else { - commitMessage.WriteString("COPY") - } - - var a *llb.FileAction - - for _, src := range c.Sources() { - commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !isAddCommand { - return errors.New("source can't be a URL for COPY") - } - - // Resources from remote URLs are not decompressed. - // https://docs.docker.com/engine/reference/builder/#add - // - // Note: mixing up remote archives and local archives in a single ADD instruction - // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 - u, err := url.Parse(src) - f := "__unnamed__" - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - f = base - } - } - - st := llb.HTTP(src, llb.Filename(f), dfCmd(c)) - - opts := append([]llb.CopyOption{&llb.CopyInfo{ - CreateDestPath: true, - }}, copyOpt...) - - if a == nil { - a = llb.Copy(st, f, dest, opts...) - } else { - a = a.Copy(st, f, dest, opts...) - } - } else { - opts := append([]llb.CopyOption{&llb.CopyInfo{ - FollowSymlinks: true, - CopyDirContentsOnly: true, - AttemptUnpack: isAddCommand, - CreateDestPath: true, - AllowWildcard: true, - AllowEmptyWildcard: true, - }}, copyOpt...) - - if a == nil { - a = llb.Copy(sourceState, filepath.Join("/", src), dest, opts...) - } else { - a = a.Copy(sourceState, filepath.Join("/", src), dest, opts...) - } - } - } - - commitMessage.WriteString(" " + c.Dest()) - - platform := opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - - fileOpt := []llb.ConstraintsOpt{llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} - if d.ignoreCache { - fileOpt = append(fileOpt, llb.IgnoreCache) - } - - d.state = d.state.File(a, fileOpt...) - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) -} - -func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { - if useFileOp(opt.buildArgValues, opt.llbCaps) { - return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, opt) - } - - img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) - - dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest())) - if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { - dest += string(filepath.Separator) - } - args := []string{"copy"} - unpack := isAddCommand - - mounts := make([]llb.RunOption, 0, len(c.Sources())) - if chown != "" { - args = append(args, fmt.Sprintf("--chown=%s", chown)) - _, _, err := parseUser(chown) - if err != nil { - mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) - mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) - } - } - - commitMessage := bytes.NewBufferString("") - if isAddCommand { - commitMessage.WriteString("ADD") - } else { - commitMessage.WriteString("COPY") - } - - for i, src := range c.Sources() { - commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !isAddCommand { - return errors.New("source can't be a URL for COPY") - } - - // Resources from remote URLs are not decompressed. - // https://docs.docker.com/engine/reference/builder/#add - // - // Note: mixing up remote archives and local archives in a single ADD instruction - // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 - unpack = false - u, err := url.Parse(src) - f := "__unnamed__" - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - f = base - } - } - target := path.Join(fmt.Sprintf("/src-%d", i), f) - args = append(args, target) - mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly)) - } else { - d, f := splitWildcards(src) - targetCmd := fmt.Sprintf("/src-%d", i) - targetMount := targetCmd - if f == "" { - f = path.Base(src) - targetMount = path.Join(targetMount, f) - } - targetCmd = path.Join(targetCmd, f) - args = append(args, targetCmd) - mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly)) - } - } - - commitMessage.WriteString(" " + c.Dest()) - - args = append(args, dest) - if unpack { - args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) - } - - platform := opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - - runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} - if d.ignoreCache { - runOpt = append(runOpt, llb.IgnoreCache) - } - - if opt.llbCaps != nil { - if err := opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { - runOpt = append(runOpt, llb.Network(llb.NetModeNone)) - } - } - - run := img.Run(append(runOpt, mounts...)...) - d.state = run.AddMount("/dest", d.state).Platform(platform) - - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) -} - -func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { - d.image.Author = c.Maintainer - return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) -} - -func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { - commitMessage := bytes.NewBufferString("LABEL") - if d.image.Config.Labels == nil { - d.image.Config.Labels = make(map[string]string, len(c.Labels)) - } - for _, v := range c.Labels { - d.image.Config.Labels[v.Key] = v.Value - commitMessage.WriteString(" " + v.String()) - } - return commitToHistory(&d.image, commitMessage.String(), false, nil) -} - -func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { - d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression) - return nil -} - -func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - d.image.Config.Cmd = args - d.image.Config.ArgsEscaped = true - d.cmdSet = true - return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) -} - -func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { - var args []string = c.CmdLine - if c.PrependShell { - args = withShell(d.image, args) - } - d.image.Config.Entrypoint = args - if !d.cmdSet { - d.image.Config.Cmd = nil - } - return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) -} - -func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { - d.image.Config.Healthcheck = &HealthConfig{ - Test: c.Health.Test, - Interval: c.Health.Interval, - Timeout: c.Health.Timeout, - StartPeriod: c.Health.StartPeriod, - Retries: c.Health.Retries, - } - return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) -} - -func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { - ports := []string{} - for _, p := range c.Ports { - ps, err := shlex.ProcessWords(p, d.state.Env()) - if err != nil { - return err - } - ports = append(ports, ps...) - } - c.Ports = ports - - ps, _, err := nat.ParsePortSpecs(c.Ports) - if err != nil { - return err - } - - if d.image.Config.ExposedPorts == nil { - d.image.Config.ExposedPorts = make(map[string]struct{}) - } - for p := range ps { - d.image.Config.ExposedPorts[string(p)] = struct{}{} - } - - return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) -} - -func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { - d.state = d.state.User(c.User) - d.image.Config.User = c.User - if commit { - return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) - } - return nil -} - -func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { - if d.image.Config.Volumes == nil { - d.image.Config.Volumes = map[string]struct{}{} - } - for _, v := range c.Volumes { - if v == "" { - return errors.New("VOLUME specified can not be an empty string") - } - d.image.Config.Volumes[v] = struct{}{} - } - return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) -} - -func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { - if _, err := signal.ParseSignal(c.Signal); err != nil { - return err - } - d.image.Config.StopSignal = c.Signal - return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) -} - -func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { - d.image.Config.Shell = c.Shell - return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) -} - -func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { - commitStr := "ARG " + c.Key - buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues) - - if c.Value != nil { - commitStr += "=" + *c.Value - } - if buildArg.Value == nil { - for _, ma := range metaArgs { - if ma.Key == buildArg.Key { - buildArg.Value = ma.Value - } - } - } - - if buildArg.Value != nil { - d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) - } - - d.buildArgs = append(d.buildArgs, buildArg) - return commitToHistory(&d.image, commitStr, false, nil) -} - -func pathRelativeToWorkingDir(s llb.State, p string) string { - if path.IsAbs(p) { - return p - } - return path.Join(s.GetDir(), p) -} - -func splitWildcards(name string) (string, string) { - i := 0 - for ; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - break - } - } - if i == len(name) { - return name, "" - } - - base := path.Base(name[:i]) - if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { - base = "" - } - return path.Dir(name[:i]), base + name[i:] -} - -func addEnv(env []string, k, v string) []string { - gotOne := false - for i, envVar := range env { - key, _ := parseKeyValue(envVar) - if shell.EqualEnvKeys(key, k) { - env[i] = k + "=" + v - gotOne = true - break - } - } - if !gotOne { - env = append(env, k+"="+v) - } - return env -} - -func parseKeyValue(env string) (string, string) { - parts := strings.SplitN(env, "=", 2) - v := "" - if len(parts) > 1 { - v = parts[1] - } - - return parts[0], v -} - -func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional { - if v, ok := values[kvpo.Key]; ok { - kvpo.Value = &v - } - return kvpo -} - -func dfCmd(cmd interface{}) llb.ConstraintsOpt { - // TODO: add fmt.Stringer to instructions.Command to remove interface{} - var cmdStr string - if cmd, ok := cmd.(fmt.Stringer); ok { - cmdStr = cmd.String() - } - if cmd, ok := cmd.(string); ok { - cmdStr = cmd - } - return llb.WithDescription(map[string]string{ - "com.docker.dockerfile.v1.command": cmdStr, - }) -} - -func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional, envMap map[string]string) string { - var tmpBuildEnv []string - for _, arg := range buildArgs { - v, ok := envMap[arg.Key] - if !ok { - v = arg.ValueString() - } - tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+v) - } - if len(tmpBuildEnv) > 0 { - tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) - } - - return strings.Join(append(tmpBuildEnv, args...), " ") -} - -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { - if st != nil { - msg += " # buildkit" - } - - img.History = append(img.History, specs.History{ - CreatedBy: msg, - Comment: historyComment, - EmptyLayer: !withLayer, - }) - return nil -} - -func isReachable(from, to *dispatchState) (ret bool) { - if from == nil { - return false - } - if from == to || isReachable(from.base, to) { - return true - } - for d := range from.deps { - if isReachable(d, to) { - return true - } - } - return false -} - -func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { - var visit func(state *dispatchState) bool - if states == nil { - return false, nil - } - visited := make(map[*dispatchState]struct{}) - path := make(map[*dispatchState]struct{}) - - visit = func(state *dispatchState) bool { - _, ok := visited[state] - if ok { - return false - } - visited[state] = struct{}{} - path[state] = struct{}{} - for dep := range state.deps { - _, ok = path[dep] - if ok { - return true - } - if visit(dep) { - return true - } - } - delete(path, state) - return false - } - for _, state := range states { - if visit(state) { - return true, state - } - } - return false, nil -} - -func parseUser(str string) (uid uint32, gid uint32, err error) { - if str == "" { - return 0, 0, nil - } - parts := strings.SplitN(str, ":", 2) - for i, v := range parts { - switch i { - case 0: - uid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - if len(parts) == 1 { - gid = uid - } - case 1: - gid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - } - } - return -} - -func parseUID(str string) (uint32, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(uid), nil -} - -func normalizeContextPaths(paths map[string]struct{}) []string { - pathSlice := make([]string, 0, len(paths)) - for p := range paths { - if p == "/" { - return nil - } - pathSlice = append(pathSlice, path.Join(".", p)) - } - - sort.Slice(pathSlice, func(i, j int) bool { - return pathSlice[i] < pathSlice[j] - }) - return pathSlice -} - -func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { - pe := &llb.ProxyEnv{} - isNil := true - for k, v := range args { - if strings.EqualFold(k, "http_proxy") { - pe.HttpProxy = v - isNil = false - } - if strings.EqualFold(k, "https_proxy") { - pe.HttpsProxy = v - isNil = false - } - if strings.EqualFold(k, "ftp_proxy") { - pe.FtpProxy = v - isNil = false - } - if strings.EqualFold(k, "no_proxy") { - pe.NoProxy = v - isNil = false - } - } - if isNil { - return nil - } - return pe -} - -type mutableOutput struct { - llb.Output -} - -func withShell(img Image, args []string) []string { - var shell []string - if len(img.Config.Shell) > 0 { - shell = append([]string{}, img.Config.Shell...) - } else { - shell = defaultShell() - } - return append(shell, strings.Join(args, " ")) -} - -func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform { - os := img.OS - arch := img.Architecture - if target.OS == os && target.Architecture == arch { - return target - } - for _, p := range supported { - if p.OS == os && p.Architecture == arch { - return p - } - } - return target -} - -func WithInternalName(name string) llb.ConstraintsOpt { - return llb.WithCustomName("[internal] " + name) -} - -func uppercaseCmd(str string) string { - p := strings.SplitN(str, " ", 2) - p[0] = strings.ToUpper(p[0]) - return strings.Join(p, " ") -} - -func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string { - w, err := shlex.ProcessWord(cmd, env) - if err != nil { - return cmd - } - return w -} - -func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string { - if ds.cmdTotal == 0 { - return str - } - out := "[" - if prefixPlatform && platform != nil { - out += platforms.Format(*platform) + " " - } - if ds.stageName != "" { - out += ds.stageName + " " - } - ds.cmdIndex++ - out += fmt.Sprintf("%*d/%d] ", int(1+math.Log10(float64(ds.cmdTotal))), ds.cmdIndex, ds.cmdTotal) - return out + str -} - -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go deleted file mode 100644 index 5f0cd086..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !dfrunmount - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { - return false -} - -func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go deleted file mode 100644 index 300b9d85..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !dfrunnetwork - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func dispatchRunNetwork(c *instructions.RunCommand) (llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go deleted file mode 100644 index 10184b42..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !dfrunsecurity - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func dispatchRunSecurity(c *instructions.RunCommand) (llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go deleted file mode 100644 index d7547021..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfsecrets - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("secret mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go deleted file mode 100644 index 8b8afdc3..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfssh - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("ssh mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go deleted file mode 100644 index 33595abb..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ /dev/null @@ -1,156 +0,0 @@ -// +build dfrunmount - -package dockerfile2llb - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "strings" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/solver/pb" - "github.com/pkg/errors" -) - -func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { - if c, ok := cmd.Command.(*instructions.RunCommand); ok { - mounts := instructions.GetMounts(c) - sources := make([]*dispatchState, len(mounts)) - for i, mount := range mounts { - if mount.From == "" && mount.Type == instructions.MountTypeCache { - mount.From = emptyImageName - } - from := mount.From - if from == "" || mount.Type == instructions.MountTypeTmpfs { - continue - } - stn, ok := allDispatchStates.findStateByName(from) - if !ok { - stn = &dispatchState{ - stage: instructions.Stage{BaseName: from}, - deps: make(map[*dispatchState]struct{}), - unregistered: true, - } - } - sources[i] = stn - } - cmd.sources = sources - return true - } - - return false -} - -func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { - uid := 0 - gid := 0 - mode := os.FileMode(0755) - if m.UID != nil { - uid = int(*m.UID) - } - if m.GID != nil { - gid = int(*m.GID) - } - if m.Mode != nil { - mode = os.FileMode(*m.Mode) - } - return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions")) -} - -func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State { - if fileop { - return setCacheUIDGIDFileOp(m, st) - } - - var b strings.Builder - if m.UID != nil { - b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID)) - } - if m.GID != nil { - b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID)) - } - if m.Mode != nil { - b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8))) - } - return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st) -} - -func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { - var out []llb.RunOption - mounts := instructions.GetMounts(c) - - for i, mount := range mounts { - if mount.From == "" && mount.Type == instructions.MountTypeCache { - mount.From = emptyImageName - } - st := opt.buildContext - if mount.From != "" { - st = sources[i].state - } - var mountOpts []llb.MountOption - if mount.Type == instructions.MountTypeTmpfs { - st = llb.Scratch() - mountOpts = append(mountOpts, llb.Tmpfs()) - } - if mount.Type == instructions.MountTypeSecret { - secret, err := dispatchSecret(mount) - if err != nil { - return nil, err - } - out = append(out, secret) - continue - } - if mount.Type == instructions.MountTypeSSH { - ssh, err := dispatchSSH(mount) - if err != nil { - return nil, err - } - out = append(out, ssh) - continue - } - if mount.ReadOnly { - mountOpts = append(mountOpts, llb.Readonly) - } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOuput) == nil { - mountOpts = append(mountOpts, llb.ForceNoOutput) - } - if mount.Type == instructions.MountTypeCache { - sharing := llb.CacheMountShared - if mount.CacheSharing == instructions.MountSharingPrivate { - sharing = llb.CacheMountPrivate - } - if mount.CacheSharing == instructions.MountSharingLocked { - sharing = llb.CacheMountLocked - } - if mount.CacheID == "" { - mount.CacheID = path.Clean(mount.Target) - } - mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID, sharing)) - } - target := mount.Target - if !filepath.IsAbs(filepath.Clean(mount.Target)) { - target = filepath.Join("/", d.state.GetDir(), mount.Target) - } - if target == "/" { - return nil, errors.Errorf("invalid mount target %q", target) - } - if src := path.Join("/", mount.Source); src != "/" { - mountOpts = append(mountOpts, llb.SourcePath(src)) - } else { - if mount.UID != nil || mount.GID != nil || mount.Mode != nil { - st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps)) - mountOpts = append(mountOpts, llb.SourcePath("/cache")) - } - } - - out = append(out, llb.AddMount(target, st, mountOpts...)) - - if mount.From == "" { - d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{} - } - } - return out, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go deleted file mode 100644 index 01313e23..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build dfrunnetwork - -package dockerfile2llb - -import ( - "github.com/pkg/errors" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/solver/pb" -) - -func dispatchRunNetwork(c *instructions.RunCommand) (llb.RunOption, error) { - network := instructions.GetNetwork(c) - - switch network { - case instructions.NetworkDefault: - return nil, nil - case instructions.NetworkNone: - return llb.Network(pb.NetMode_NONE), nil - case instructions.NetworkHost: - return llb.Network(pb.NetMode_HOST), nil - default: - return nil, errors.Errorf("unsupported network mode %q", network) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go deleted file mode 100644 index 764424a2..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build dfrunsecurity - -package dockerfile2llb - -import ( - "github.com/pkg/errors" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/solver/pb" -) - -func dispatchRunSecurity(c *instructions.RunCommand) (llb.RunOption, error) { - security := instructions.GetSecurity(c) - - switch security { - case instructions.SecurityInsecure: - return llb.Security(pb.SecurityMode_INSECURE), nil - case instructions.SecuritySandbox: - return llb.Security(pb.SecurityMode_SANDBOX), nil - default: - return nil, errors.Errorf("unsupported security mode %q", security) - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go deleted file mode 100644 index 59c055a0..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build dfsecrets - -package dockerfile2llb - -import ( - "path" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { - id := m.CacheID - if m.Source != "" { - id = m.Source - } - - if id == "" { - if m.Target == "" { - return nil, errors.Errorf("one of source, target required") - } - id = path.Base(m.Target) - } - - target := m.Target - if target == "" { - target = "/run/secrets/" + path.Base(id) - } - - opts := []llb.SecretOption{llb.SecretID(id)} - - if !m.Required { - opts = append(opts, llb.SecretOptional) - } - - if m.UID != nil || m.GID != nil || m.Mode != nil { - var uid, gid, mode int - if m.UID != nil { - uid = int(*m.UID) - } - if m.GID != nil { - gid = int(*m.GID) - } - if m.Mode != nil { - mode = int(*m.Mode) - } else { - mode = 0400 - } - opts = append(opts, llb.SecretFileOpt(uid, gid, mode)) - } - - return llb.AddSecret(target, opts...), nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go deleted file mode 100644 index a29b5350..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build dfssh - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { - if m.Source != "" { - return nil, errors.Errorf("ssh does not support source") - } - opts := []llb.SSHOption{llb.SSHID(m.CacheID)} - - if m.Target != "" { - opts = append(opts, llb.SSHSocketTarget(m.Target)) - } - - if !m.Required { - opts = append(opts, llb.SSHOptional) - } - - if m.UID != nil || m.GID != nil || m.Mode != nil { - var uid, gid, mode int - if m.UID != nil { - uid = int(*m.UID) - } - if m.GID != nil { - gid = int(*m.GID) - } - if m.Mode != nil { - mode = int(*m.Mode) - } else { - mode = 0600 - } - opts = append(opts, llb.SSHSocketOpt(m.Target, uid, gid, mode)) - } - - return llb.AddSSHSocket(opts...), nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go deleted file mode 100644 index b5d541d1..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"/bin/sh", "-c"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go deleted file mode 100644 index 7693e050..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"cmd", "/S", "/C"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go deleted file mode 100644 index cf06b5ad..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go +++ /dev/null @@ -1,38 +0,0 @@ -package dockerfile2llb - -import ( - "bufio" - "io" - "regexp" - "strings" -) - -const keySyntax = "syntax" - -var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) - -func DetectSyntax(r io.Reader) (string, string, bool) { - directives := ParseDirectives(r) - if len(directives) == 0 { - return "", "", false - } - v, ok := directives[keySyntax] - if !ok { - return "", "", false - } - p := strings.SplitN(v, " ", 2) - return p[0], v, true -} - -func ParseDirectives(r io.Reader) map[string]string { - m := map[string]string{} - s := bufio.NewScanner(r) - for s.Scan() { - match := reDirective.FindStringSubmatch(s.Text()) - if len(match) == 0 { - return m - } - m[strings.ToLower(match[1])] = match[2] - } - return m -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go deleted file mode 100644 index 55e9add2..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ /dev/null @@ -1,79 +0,0 @@ -package dockerfile2llb - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ImageConfig is a docker compatible config for an image -type ImageConfig struct { - specs.ImageConfig - - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - - // NetworkDisabled bool `json:",omitempty"` // Is network disabled - // MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - specs.Image - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // Variant defines platform variant. To be added to OCI. - Variant string `json:"variant,omitempty"` -} - -func clone(src Image) Image { - img := src - img.Config = src.Config - img.Config.Env = append([]string{}, src.Config.Env...) - img.Config.Cmd = append([]string{}, src.Config.Cmd...) - img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...) - return img -} - -func emptyImage(platform specs.Platform) Image { - img := Image{ - Image: specs.Image{ - Architecture: platform.Architecture, - OS: platform.OS, - }, - Variant: platform.Variant, - } - img.RootFS.Type = "layers" - img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} - return img -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go deleted file mode 100644 index e1ef78f8..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go +++ /dev/null @@ -1,58 +0,0 @@ -package dockerfile2llb - -import ( - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type platformOpt struct { - targetPlatform specs.Platform - buildPlatforms []specs.Platform - implicitTarget bool -} - -func buildPlatformOpt(opt *ConvertOpt) *platformOpt { - buildPlatforms := opt.BuildPlatforms - targetPlatform := opt.TargetPlatform - implicitTargetPlatform := false - - if opt.TargetPlatform != nil && opt.BuildPlatforms == nil { - buildPlatforms = []specs.Platform{*opt.TargetPlatform} - } - if len(buildPlatforms) == 0 { - buildPlatforms = []specs.Platform{platforms.DefaultSpec()} - } - - if opt.TargetPlatform == nil { - implicitTargetPlatform = true - targetPlatform = &buildPlatforms[0] - } - - return &platformOpt{ - targetPlatform: *targetPlatform, - buildPlatforms: buildPlatforms, - implicitTarget: implicitTargetPlatform, - } -} - -func getPlatformArgs(po *platformOpt) []instructions.KeyValuePairOptional { - bp := po.buildPlatforms[0] - tp := po.targetPlatform - m := map[string]string{ - "BUILDPLATFORM": platforms.Format(bp), - "BUILDOS": bp.OS, - "BUILDARCH": bp.Architecture, - "BUILDVARIANT": bp.Variant, - "TARGETPLATFORM": platforms.Format(tp), - "TARGETOS": tp.OS, - "TARGETARCH": tp.Architecture, - "TARGETVARIANT": tp.Variant, - } - opts := make([]instructions.KeyValuePairOptional, 0, len(m)) - for k, v := range m { - s := v - opts = append(opts, instructions.KeyValuePairOptional{Key: k, Value: &s}) - } - return opts -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go deleted file mode 100644 index d8bf7473..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ /dev/null @@ -1,200 +0,0 @@ -package instructions - -import ( - "fmt" - "strings" -) - -// FlagType is the type of the build flag -type FlagType int - -const ( - boolType FlagType = iota - stringType - stringsType -) - -// BFlags contains all flags information for the builder -type BFlags struct { - Args []string // actual flags/args from cmd line - flags map[string]*Flag - used map[string]*Flag - Err error -} - -// Flag contains all information for a flag -type Flag struct { - bf *BFlags - name string - flagType FlagType - Value string - StringValues []string -} - -// NewBFlags returns the new BFlags struct -func NewBFlags() *BFlags { - return &BFlags{ - flags: make(map[string]*Flag), - used: make(map[string]*Flag), - } -} - -// NewBFlagsWithArgs returns the new BFlags struct with Args set to args -func NewBFlagsWithArgs(args []string) *BFlags { - flags := NewBFlags() - flags.Args = args - return flags -} - -// AddBool adds a bool flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddBool(name string, def bool) *Flag { - flag := bf.addFlag(name, boolType) - if flag == nil { - return nil - } - if def { - flag.Value = "true" - } else { - flag.Value = "false" - } - return flag -} - -// AddString adds a string flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddString(name string, def string) *Flag { - flag := bf.addFlag(name, stringType) - if flag == nil { - return nil - } - flag.Value = def - return flag -} - -// AddStrings adds a string flag to BFlags that can match multiple values -func (bf *BFlags) AddStrings(name string) *Flag { - flag := bf.addFlag(name, stringsType) - if flag == nil { - return nil - } - return flag -} - -// addFlag is a generic func used by the other AddXXX() func -// to add a new flag to the BFlags struct. -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { - if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) - return nil - } - - newFlag := &Flag{ - bf: bf, - name: name, - flagType: flagType, - } - bf.flags[name] = newFlag - - return newFlag -} - -// IsUsed checks if the flag is used -func (fl *Flag) IsUsed() bool { - if _, ok := fl.bf.used[fl.name]; ok { - return true - } - return false -} - -// IsTrue checks if a bool flag is true -func (fl *Flag) IsTrue() bool { - if fl.flagType != boolType { - // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) - } - return fl.Value == "true" -} - -// Parse parses and checks if the BFlags is valid. -// Any error noticed during the AddXXX() funcs will be generated/returned -// here. We do this because an error during AddXXX() is more like a -// compile time error so it doesn't matter too much when we stop our -// processing as long as we do stop it, so this allows the code -// around AddXXX() to be just: -// defFlag := AddString("description", "") -// w/o needing to add an if-statement around each one. -func (bf *BFlags) Parse() error { - // If there was an error while defining the possible flags - // go ahead and bubble it back up here since we didn't do it - // earlier in the processing - if bf.Err != nil { - return fmt.Errorf("Error setting up flags: %s", bf.Err) - } - - for _, arg := range bf.Args { - if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("Arg should start with -- : %s", arg) - } - - if arg == "--" { - return nil - } - - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } - - flag, ok := bf.flags[arg] - if !ok { - return fmt.Errorf("Unknown flag: %s", arg) - } - - if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { - return fmt.Errorf("Duplicate flag specified: %s", arg) - } - - bf.used[arg] = flag - - switch flag.flagType { - case boolType: - // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - - lower := strings.ToLower(value) - if lower == "" { - flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { - return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) - } - - case stringType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.Value = value - - case stringsType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.StringValues = append(flag.StringValues, value) - - default: - panic("No idea what kind of flag we have! Should never get here!") - } - - } - - return nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go deleted file mode 100644 index ed96d7e0..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ /dev/null @@ -1,451 +0,0 @@ -package instructions - -import ( - "errors" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" -) - -// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) -type KeyValuePair struct { - Key string - Value string -} - -func (kvp *KeyValuePair) String() string { - return kvp.Key + "=" + kvp.Value -} - -// KeyValuePairOptional is the same as KeyValuePair but Value is optional -type KeyValuePairOptional struct { - Key string - Value *string -} - -func (kvpo *KeyValuePairOptional) ValueString() string { - v := "" - if kvpo.Value != nil { - v = *kvpo.Value - } - return v -} - -// Command is implemented by every command present in a dockerfile -type Command interface { - Name() string -} - -// KeyValuePairs is a slice of KeyValuePair -type KeyValuePairs []KeyValuePair - -// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code) -type withNameAndCode struct { - code string - name string -} - -func (c *withNameAndCode) String() string { - return c.code -} - -// Name of the command -func (c *withNameAndCode) Name() string { - return c.name -} - -func newWithNameAndCode(req parseRequest) withNameAndCode { - return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command} -} - -// SingleWordExpander is a provider for variable expansion where 1 word => 1 output -type SingleWordExpander func(word string) (string, error) - -// SupportsSingleWordExpansion interface marks a command as supporting variable expansion -type SupportsSingleWordExpansion interface { - Expand(expander SingleWordExpander) error -} - -// PlatformSpecific adds platform checks to a command -type PlatformSpecific interface { - CheckPlatform(platform string) error -} - -func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) { - key, err := expander(kvp.Key) - if err != nil { - return KeyValuePair{}, err - } - value, err := expander(kvp.Value) - if err != nil { - return KeyValuePair{}, err - } - return KeyValuePair{Key: key, Value: value}, nil -} -func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error { - for i, kvp := range kvps { - newKvp, err := expandKvp(kvp, expander) - if err != nil { - return err - } - kvps[i] = newKvp - } - return nil -} - -func expandSliceInPlace(values []string, expander SingleWordExpander) error { - for i, v := range values { - newValue, err := expander(v) - if err != nil { - return err - } - values[i] = newValue - } - return nil -} - -// EnvCommand : ENV key1 value1 [keyN valueN...] -type EnvCommand struct { - withNameAndCode - Env KeyValuePairs // kvp slice instead of map to preserve ordering -} - -// Expand variables -func (c *EnvCommand) Expand(expander SingleWordExpander) error { - return expandKvpsInPlace(c.Env, expander) -} - -// MaintainerCommand : MAINTAINER maintainer_name -type MaintainerCommand struct { - withNameAndCode - Maintainer string -} - -// NewLabelCommand creates a new 'LABEL' command -func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand { - kvp := KeyValuePair{Key: k, Value: v} - c := "LABEL " - c += kvp.String() - nc := withNameAndCode{code: c, name: "label"} - cmd := &LabelCommand{ - withNameAndCode: nc, - Labels: KeyValuePairs{ - kvp, - }, - noExpand: NoExp, - } - return cmd -} - -// LabelCommand : LABEL some json data describing the image -// -// Sets the Label variable foo to bar, -// -type LabelCommand struct { - withNameAndCode - Labels KeyValuePairs // kvp slice instead of map to preserve ordering - noExpand bool -} - -// Expand variables -func (c *LabelCommand) Expand(expander SingleWordExpander) error { - if c.noExpand { - return nil - } - return expandKvpsInPlace(c.Labels, expander) -} - -// SourcesAndDest represent a list of source files and a destination -type SourcesAndDest []string - -// Sources list the source paths -func (s SourcesAndDest) Sources() []string { - res := make([]string, len(s)-1) - copy(res, s[:len(s)-1]) - return res -} - -// Dest path of the operation -func (s SourcesAndDest) Dest() string { - return s[len(s)-1] -} - -// AddCommand : ADD foo /path -// -// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling -// exist here. If you do not wish to have this automatic handling, use COPY. -// -type AddCommand struct { - withNameAndCode - SourcesAndDest - Chown string -} - -// Expand variables -func (c *AddCommand) Expand(expander SingleWordExpander) error { - return expandSliceInPlace(c.SourcesAndDest, expander) -} - -// CopyCommand : COPY foo /path -// -// Same as 'ADD' but without the tar and remote url handling. -// -type CopyCommand struct { - withNameAndCode - SourcesAndDest - From string - Chown string -} - -// Expand variables -func (c *CopyCommand) Expand(expander SingleWordExpander) error { - expandedChown, err := expander(c.Chown) - if err != nil { - return err - } - c.Chown = expandedChown - return expandSliceInPlace(c.SourcesAndDest, expander) -} - -// OnbuildCommand : ONBUILD -type OnbuildCommand struct { - withNameAndCode - Expression string -} - -// WorkdirCommand : WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. -// -type WorkdirCommand struct { - withNameAndCode - Path string -} - -// Expand variables -func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Path) - if err != nil { - return err - } - c.Path = p - return nil -} - -// ShellDependantCmdLine represents a cmdline optionally prepended with the shell -type ShellDependantCmdLine struct { - CmdLine strslice.StrSlice - PrependShell bool -} - -// RunCommand : RUN some command yo -// -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: -// -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi -// -type RunCommand struct { - withNameAndCode - withExternalData - ShellDependantCmdLine -} - -// CmdCommand : CMD foo -// -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -type CmdCommand struct { - withNameAndCode - ShellDependantCmdLine -} - -// HealthCheckCommand : HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -type HealthCheckCommand struct { - withNameAndCode - Health *container.HealthConfig -} - -// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint -// is initialized at newBuilder time instead of through argument parsing. -// -type EntrypointCommand struct { - withNameAndCode - ShellDependantCmdLine -} - -// ExposeCommand : EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// req.runConfig.ExposedPorts for runconfig. -// -type ExposeCommand struct { - withNameAndCode - Ports []string -} - -// UserCommand : USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. -// -type UserCommand struct { - withNameAndCode - User string -} - -// Expand variables -func (c *UserCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.User) - if err != nil { - return err - } - c.User = p - return nil -} - -// VolumeCommand : VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. -// -type VolumeCommand struct { - withNameAndCode - Volumes []string -} - -// Expand variables -func (c *VolumeCommand) Expand(expander SingleWordExpander) error { - return expandSliceInPlace(c.Volumes, expander) -} - -// StopSignalCommand : STOPSIGNAL signal -// -// Set the signal that will be used to kill the container. -type StopSignalCommand struct { - withNameAndCode - Signal string -} - -// Expand variables -func (c *StopSignalCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Signal) - if err != nil { - return err - } - c.Signal = p - return nil -} - -// CheckPlatform checks that the command is supported in the target platform -func (c *StopSignalCommand) CheckPlatform(platform string) error { - if platform == "windows" { - return errors.New("The daemon on this platform does not support the command stopsignal") - } - return nil -} - -// ArgCommand : ARG name[=value] -// -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. -type ArgCommand struct { - withNameAndCode - KeyValuePairOptional -} - -// Expand variables -func (c *ArgCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Key) - if err != nil { - return err - } - c.Key = p - if c.Value != nil { - p, err = expander(*c.Value) - if err != nil { - return err - } - c.Value = &p - } - return nil -} - -// ShellCommand : SHELL powershell -command -// -// Set the non-default shell to use. -type ShellCommand struct { - withNameAndCode - Shell strslice.StrSlice -} - -// Stage represents a single stage in a multi-stage build -type Stage struct { - Name string - Commands []Command - BaseName string - SourceCode string - Platform string -} - -// AddCommand to the stage -func (s *Stage) AddCommand(cmd Command) { - // todo: validate cmd type - s.Commands = append(s.Commands, cmd) -} - -// IsCurrentStage check if the stage name is the current stage -func IsCurrentStage(s []Stage, name string) bool { - if len(s) == 0 { - return false - } - return s[len(s)-1].Name == name -} - -// CurrentStage return the last stage in a slice -func CurrentStage(s []Stage) (*Stage, error) { - if len(s) == 0 { - return nil, errors.New("No build stage in current context") - } - return &s[len(s)-1], nil -} - -// HasStage looks for the presence of a given stage name -func HasStage(s []Stage, name string) (int, bool) { - for i, stage := range s { - // Stage name is case-insensitive by design - if strings.EqualFold(stage.Name, name) { - return i, true - } - } - return -1, false -} - -type withExternalData struct { - m map[interface{}]interface{} -} - -func (c *withExternalData) getExternalValue(k interface{}) interface{} { - return c.m[k] -} - -func (c *withExternalData) setExternalValue(k, v interface{}) { - if c.m == nil { - c.m = map[interface{}]interface{}{} - } - c.m[k] = v -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go deleted file mode 100644 index 58780648..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfsecrets - -package instructions - -func isSecretMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go deleted file mode 100644 index a131a273..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfssh - -package instructions - -func isSSHMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go deleted file mode 100644 index 442877d8..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build dfrunmount - -package instructions - -import ( - "encoding/csv" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const MountTypeBind = "bind" -const MountTypeCache = "cache" -const MountTypeTmpfs = "tmpfs" -const MountTypeSecret = "secret" -const MountTypeSSH = "ssh" - -var allowedMountTypes = map[string]struct{}{ - MountTypeBind: {}, - MountTypeCache: {}, - MountTypeTmpfs: {}, - MountTypeSecret: {}, - MountTypeSSH: {}, -} - -const MountSharingShared = "shared" -const MountSharingPrivate = "private" -const MountSharingLocked = "locked" - -var allowedSharingTypes = map[string]struct{}{ - MountSharingShared: {}, - MountSharingPrivate: {}, - MountSharingLocked: {}, -} - -type mountsKeyT string - -var mountsKey = mountsKeyT("dockerfile/run/mounts") - -func init() { - parseRunPreHooks = append(parseRunPreHooks, runMountPreHook) - parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) -} - -func isValidMountType(s string) bool { - if s == "secret" { - if !isSecretMountsSupported() { - return false - } - } - if s == "ssh" { - if !isSSHMountsSupported() { - return false - } - } - _, ok := allowedMountTypes[s] - return ok -} - -func runMountPreHook(cmd *RunCommand, req parseRequest) error { - st := &mountState{} - st.flag = req.flags.AddStrings("mount") - cmd.setExternalValue(mountsKey, st) - return nil -} - -func runMountPostHook(cmd *RunCommand, req parseRequest) error { - st := getMountState(cmd) - if st == nil { - return errors.Errorf("no mount state") - } - var mounts []*Mount - for _, str := range st.flag.StringValues { - m, err := parseMount(str) - if err != nil { - return err - } - mounts = append(mounts, m) - } - st.mounts = mounts - return nil -} - -func getMountState(cmd *RunCommand) *mountState { - v := cmd.getExternalValue(mountsKey) - if v == nil { - return nil - } - return v.(*mountState) -} - -func GetMounts(cmd *RunCommand) []*Mount { - return getMountState(cmd).mounts -} - -type mountState struct { - flag *Flag - mounts []*Mount -} - -type Mount struct { - Type string - From string - Source string - Target string - ReadOnly bool - CacheID string - CacheSharing string - Required bool - Mode *uint64 - UID *uint64 - GID *uint64 -} - -func parseMount(value string) (*Mount, error) { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return nil, errors.Wrap(err, "failed to parse csv mounts") - } - - m := &Mount{Type: MountTypeBind} - - roAuto := true - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - m.ReadOnly = true - roAuto = false - continue - case "readwrite", "rw": - m.ReadOnly = false - roAuto = false - continue - case "required": - if m.Type == "secret" || m.Type == "ssh" { - m.Required = true - continue - } else { - return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) - } - } - } - - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - if !isValidMountType(strings.ToLower(value)) { - return nil, errors.Errorf("unsupported mount type %q", value) - } - m.Type = strings.ToLower(value) - case "from": - m.From = value - case "source", "src": - m.Source = value - case "target", "dst", "destination": - m.Target = value - case "readonly", "ro": - m.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - roAuto = false - case "readwrite", "rw": - rw, err := strconv.ParseBool(value) - if err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - m.ReadOnly = !rw - roAuto = false - case "required": - if m.Type == "secret" || m.Type == "ssh" { - v, err := strconv.ParseBool(value) - if err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - m.Required = v - } else { - return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) - } - case "id": - m.CacheID = value - case "sharing": - if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { - return nil, errors.Errorf("unsupported sharing value %q", value) - } - m.CacheSharing = strings.ToLower(value) - case "mode": - mode, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return nil, errors.Errorf("invalid value %s for mode", value) - } - m.Mode = &mode - case "uid": - uid, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return nil, errors.Errorf("invalid value %s for uid", value) - } - m.UID = &uid - case "gid": - gid, err := strconv.ParseUint(value, 10, 32) - if err != nil { - return nil, errors.Errorf("invalid value %s for gid", value) - } - m.GID = &gid - default: - return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache - - if m.Mode != nil && !fileInfoAllowed { - return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) - } - - if m.UID != nil && !fileInfoAllowed { - return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) - } - - if m.GID != nil && !fileInfoAllowed { - return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) - } - - if roAuto { - if m.Type == MountTypeCache || m.Type == MountTypeTmpfs { - m.ReadOnly = false - } else { - m.ReadOnly = true - } - } - - if m.CacheSharing != "" && m.Type != MountTypeCache { - return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) - } - - if m.Type == MountTypeSecret { - if m.From != "" { - return nil, errors.Errorf("secret mount should not have a from") - } - if m.CacheSharing != "" { - return nil, errors.Errorf("secret mount should not define sharing") - } - if m.Source == "" && m.Target == "" && m.CacheID == "" { - return nil, errors.Errorf("invalid secret mount. one of source, target required") - } - if m.Source != "" && m.CacheID != "" { - return nil, errors.Errorf("both source and id can't be set") - } - } - - return m, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go deleted file mode 100644 index adef3fd7..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build dfrunnetwork - -package instructions - -import ( - "github.com/pkg/errors" -) - -const ( - NetworkDefault = "default" - NetworkNone = "none" - NetworkHost = "host" -) - -var allowedNetwork = map[string]struct{}{ - NetworkDefault: {}, - NetworkNone: {}, - NetworkHost: {}, -} - -func isValidNetwork(value string) bool { - _, ok := allowedNetwork[value] - return ok -} - -var networkKey = "dockerfile/run/network" - -func init() { - parseRunPreHooks = append(parseRunPreHooks, runNetworkPreHook) - parseRunPostHooks = append(parseRunPostHooks, runNetworkPostHook) -} - -func runNetworkPreHook(cmd *RunCommand, req parseRequest) error { - st := &networkState{} - st.flag = req.flags.AddString("network", NetworkDefault) - cmd.setExternalValue(networkKey, st) - return nil -} - -func runNetworkPostHook(cmd *RunCommand, req parseRequest) error { - st := cmd.getExternalValue(networkKey).(*networkState) - if st == nil { - return errors.Errorf("no network state") - } - - value := st.flag.Value - if !isValidNetwork(value) { - return errors.Errorf("invalid network mode %q", value) - } - - st.networkMode = value - - return nil -} - -func GetNetwork(cmd *RunCommand) string { - return cmd.getExternalValue(networkKey).(*networkState).networkMode -} - -type networkState struct { - flag *Flag - networkMode string -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go deleted file mode 100644 index 0c0be806..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build dfrunsecurity - -package instructions - -import ( - "github.com/pkg/errors" -) - -const ( - SecurityInsecure = "insecure" - SecuritySandbox = "sandbox" -) - -var allowedSecurity = map[string]struct{}{ - SecurityInsecure: {}, - SecuritySandbox: {}, -} - -func isValidSecurity(value string) bool { - _, ok := allowedSecurity[value] - return ok -} - -var securityKey = "dockerfile/run/security" - -func init() { - parseRunPreHooks = append(parseRunPreHooks, runSecurityPreHook) - parseRunPostHooks = append(parseRunPostHooks, runSecurityPostHook) -} - -func runSecurityPreHook(cmd *RunCommand, req parseRequest) error { - st := &securityState{} - st.flag = req.flags.AddString("security", SecuritySandbox) - cmd.setExternalValue(securityKey, st) - return nil -} - -func runSecurityPostHook(cmd *RunCommand, req parseRequest) error { - st := cmd.getExternalValue(securityKey).(*securityState) - if st == nil { - return errors.Errorf("no security state") - } - - value := st.flag.Value - if !isValidSecurity(value) { - return errors.Errorf("security %q is not valid", value) - } - - st.security = value - - return nil -} - -func GetSecurity(cmd *RunCommand) string { - return cmd.getExternalValue(securityKey).(*securityState).security -} - -type securityState struct { - flag *Flag - security string -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go deleted file mode 100644 index 6cce1191..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build dfsecrets - -package instructions - -func isSecretMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go deleted file mode 100644 index 0b94a564..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build dfssh - -package instructions - -func isSSHMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go deleted file mode 100644 index 0b03b34c..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package instructions - -import "fmt" - -func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go deleted file mode 100644 index a4843c5b..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -package instructions - -import ( - "fmt" - "path/filepath" - "regexp" - "strings" -) - -func errNotJSON(command, original string) error { - // For Windows users, give a hint if it looks like it might contain - // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], - // as JSON must be escaped. Unfortunate... - // - // Specifically looking for quote-driveletter-colon-backslash, there's no - // double backslash and a [] pair. No, this is not perfect, but it doesn't - // have to be. It's simply a hint to make life a little easier. - extra := "" - original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) - if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && - !strings.Contains(original, `\\`) && - strings.Contains(original, "[") && - strings.Contains(original, "]") { - extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) - } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go deleted file mode 100644 index 5dee06f1..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ /dev/null @@ -1,650 +0,0 @@ -package instructions - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/moby/buildkit/frontend/dockerfile/command" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/pkg/errors" -) - -type parseRequest struct { - command string - args []string - attributes map[string]bool - flags *BFlags - original string -} - -var parseRunPreHooks []func(*RunCommand, parseRequest) error -var parseRunPostHooks []func(*RunCommand, parseRequest) error - -func nodeArgs(node *parser.Node) []string { - result := []string{} - for ; node.Next != nil; node = node.Next { - arg := node.Next - if len(arg.Children) == 0 { - result = append(result, arg.Value) - } else if len(arg.Children) == 1 { - //sub command - result = append(result, arg.Children[0].Value) - result = append(result, nodeArgs(arg.Children[0])...) - } - } - return result -} - -func newParseRequestFromNode(node *parser.Node) parseRequest { - return parseRequest{ - command: node.Value, - args: nodeArgs(node), - attributes: node.Attributes, - original: node.Original, - flags: NewBFlagsWithArgs(node.Flags), - } -} - -// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement) -func ParseInstruction(node *parser.Node) (interface{}, error) { - req := newParseRequestFromNode(node) - switch node.Value { - case command.Env: - return parseEnv(req) - case command.Maintainer: - return parseMaintainer(req) - case command.Label: - return parseLabel(req) - case command.Add: - return parseAdd(req) - case command.Copy: - return parseCopy(req) - case command.From: - return parseFrom(req) - case command.Onbuild: - return parseOnBuild(req) - case command.Workdir: - return parseWorkdir(req) - case command.Run: - return parseRun(req) - case command.Cmd: - return parseCmd(req) - case command.Healthcheck: - return parseHealthcheck(req) - case command.Entrypoint: - return parseEntrypoint(req) - case command.Expose: - return parseExpose(req) - case command.User: - return parseUser(req) - case command.Volume: - return parseVolume(req) - case command.StopSignal: - return parseStopSignal(req) - case command.Arg: - return parseArg(req) - case command.Shell: - return parseShell(req) - } - - return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine} -} - -// ParseCommand converts an AST to a typed Command -func ParseCommand(node *parser.Node) (Command, error) { - s, err := ParseInstruction(node) - if err != nil { - return nil, err - } - if c, ok := s.(Command); ok { - return c, nil - } - return nil, errors.Errorf("%T is not a command type", s) -} - -// UnknownInstruction represents an error occurring when a command is unresolvable -type UnknownInstruction struct { - Line int - Instruction string -} - -func (e *UnknownInstruction) Error() string { - return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction)) -} - -// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction -func IsUnknownInstruction(err error) bool { - _, ok := err.(*UnknownInstruction) - if !ok { - var pe *parseError - if pe, ok = err.(*parseError); ok { - _, ok = pe.inner.(*UnknownInstruction) - } - } - return ok -} - -type parseError struct { - inner error - node *parser.Node -} - -func (e *parseError) Error() string { - return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error()) -} - -// Parse a Dockerfile into a collection of buildable stages. -// metaArgs is a collection of ARG instructions that occur before the first FROM. -func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) { - for _, n := range ast.Children { - cmd, err := ParseInstruction(n) - if err != nil { - return nil, nil, &parseError{inner: err, node: n} - } - if len(stages) == 0 { - // meta arg case - if a, isArg := cmd.(*ArgCommand); isArg { - metaArgs = append(metaArgs, *a) - continue - } - } - switch c := cmd.(type) { - case *Stage: - stages = append(stages, *c) - case Command: - stage, err := CurrentStage(stages) - if err != nil { - return nil, nil, err - } - stage.AddCommand(c) - default: - return nil, nil, errors.Errorf("%T is not a command type", cmd) - } - - } - return stages, metaArgs, nil -} - -func parseKvps(args []string, cmdName string) (KeyValuePairs, error) { - if len(args) == 0 { - return nil, errAtLeastOneArgument(cmdName) - } - if len(args)%2 != 0 { - // should never get here, but just in case - return nil, errTooManyArguments(cmdName) - } - var res KeyValuePairs - for j := 0; j < len(args); j += 2 { - if len(args[j]) == 0 { - return nil, errBlankCommandNames(cmdName) - } - name := args[j] - value := args[j+1] - res = append(res, KeyValuePair{Key: name, Value: value}) - } - return res, nil -} - -func parseEnv(req parseRequest) (*EnvCommand, error) { - - if err := req.flags.Parse(); err != nil { - return nil, err - } - envs, err := parseKvps(req.args, "ENV") - if err != nil { - return nil, err - } - return &EnvCommand{ - Env: envs, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseMaintainer(req parseRequest) (*MaintainerCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("MAINTAINER") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &MaintainerCommand{ - Maintainer: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseLabel(req parseRequest) (*LabelCommand, error) { - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - labels, err := parseKvps(req.args, "LABEL") - if err != nil { - return nil, err - } - - return &LabelCommand{ - Labels: labels, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseAdd(req parseRequest) (*AddCommand, error) { - if len(req.args) < 2 { - return nil, errNoDestinationArgument("ADD") - } - flChown := req.flags.AddString("chown", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &AddCommand{ - SourcesAndDest: SourcesAndDest(req.args), - withNameAndCode: newWithNameAndCode(req), - Chown: flChown.Value, - }, nil -} - -func parseCopy(req parseRequest) (*CopyCommand, error) { - if len(req.args) < 2 { - return nil, errNoDestinationArgument("COPY") - } - flChown := req.flags.AddString("chown", "") - flFrom := req.flags.AddString("from", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &CopyCommand{ - SourcesAndDest: SourcesAndDest(req.args), - From: flFrom.Value, - withNameAndCode: newWithNameAndCode(req), - Chown: flChown.Value, - }, nil -} - -func parseFrom(req parseRequest) (*Stage, error) { - stageName, err := parseBuildStageName(req.args) - if err != nil { - return nil, err - } - - flPlatform := req.flags.AddString("platform", "") - if err := req.flags.Parse(); err != nil { - return nil, err - } - - code := strings.TrimSpace(req.original) - return &Stage{ - BaseName: req.args[0], - Name: stageName, - SourceCode: code, - Commands: []Command{}, - Platform: flPlatform.Value, - }, nil - -} - -func parseBuildStageName(args []string) (string, error) { - stageName := "" - switch { - case len(args) == 3 && strings.EqualFold(args[1], "as"): - stageName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { - return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2]) - } - case len(args) != 1: - return "", errors.New("FROM requires either one or three arguments") - } - - return stageName, nil -} - -func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("ONBUILD") - } - if err := req.flags.Parse(); err != nil { - return nil, err - } - - triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) - switch strings.ToUpper(triggerInstruction) { - case "ONBUILD": - return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - - original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") - return &OnbuildCommand{ - Expression: original, - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseWorkdir(req parseRequest) (*WorkdirCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("WORKDIR") - } - - err := req.flags.Parse() - if err != nil { - return nil, err - } - return &WorkdirCommand{ - Path: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine { - args := handleJSONArgs(req.args, req.attributes) - cmd := strslice.StrSlice(args) - if emptyAsNil && len(cmd) == 0 { - cmd = nil - } - return ShellDependantCmdLine{ - CmdLine: cmd, - PrependShell: !req.attributes["json"], - } -} - -func parseRun(req parseRequest) (*RunCommand, error) { - cmd := &RunCommand{} - - for _, fn := range parseRunPreHooks { - if err := fn(cmd, req); err != nil { - return nil, err - } - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false) - cmd.withNameAndCode = newWithNameAndCode(req) - - for _, fn := range parseRunPostHooks { - if err := fn(cmd, req); err != nil { - return nil, err - } - } - - return cmd, nil -} - -func parseCmd(req parseRequest) (*CmdCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &CmdCommand{ - ShellDependantCmdLine: parseShellDependentCommand(req, false), - withNameAndCode: newWithNameAndCode(req), - }, nil - -} - -func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd := &EntrypointCommand{ - ShellDependantCmdLine: parseShellDependentCommand(req, true), - withNameAndCode: newWithNameAndCode(req), - } - - return cmd, nil -} - -// parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and less than minimum duration. -func parseOptInterval(f *Flag) (time.Duration, error) { - s := f.Value - if s == "" { - return 0, nil - } - d, err := time.ParseDuration(s) - if err != nil { - return 0, err - } - if d < container.MinimumDuration { - return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) - } - return d, nil -} -func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("HEALTHCHECK") - } - cmd := &HealthCheckCommand{ - withNameAndCode: newWithNameAndCode(req), - } - - typ := strings.ToUpper(req.args[0]) - args := req.args[1:] - if typ == "NONE" { - if len(args) != 0 { - return nil, errors.New("HEALTHCHECK NONE takes no arguments") - } - test := strslice.StrSlice{typ} - cmd.Health = &container.HealthConfig{ - Test: test, - } - } else { - - healthcheck := container.HealthConfig{} - - flInterval := req.flags.AddString("interval", "") - flTimeout := req.flags.AddString("timeout", "") - flStartPeriod := req.flags.AddString("start-period", "") - flRetries := req.flags.AddString("retries", "") - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - switch typ { - case "CMD": - cmdSlice := handleJSONArgs(args, req.attributes) - if len(cmdSlice) == 0 { - return nil, errors.New("Missing command after HEALTHCHECK CMD") - } - - if !req.attributes["json"] { - typ = "CMD-SHELL" - } - - healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) - default: - return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) - } - - interval, err := parseOptInterval(flInterval) - if err != nil { - return nil, err - } - healthcheck.Interval = interval - - timeout, err := parseOptInterval(flTimeout) - if err != nil { - return nil, err - } - healthcheck.Timeout = timeout - - startPeriod, err := parseOptInterval(flStartPeriod) - if err != nil { - return nil, err - } - healthcheck.StartPeriod = startPeriod - - if flRetries.Value != "" { - retries, err := strconv.ParseInt(flRetries.Value, 10, 32) - if err != nil { - return nil, err - } - if retries < 1 { - return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries) - } - healthcheck.Retries = int(retries) - } else { - healthcheck.Retries = 0 - } - - cmd.Health = &healthcheck - } - return cmd, nil -} - -func parseExpose(req parseRequest) (*ExposeCommand, error) { - portsTab := req.args - - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("EXPOSE") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - sort.Strings(portsTab) - return &ExposeCommand{ - Ports: portsTab, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseUser(req parseRequest) (*UserCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("USER") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - return &UserCommand{ - User: req.args[0], - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseVolume(req parseRequest) (*VolumeCommand, error) { - if len(req.args) == 0 { - return nil, errAtLeastOneArgument("VOLUME") - } - - if err := req.flags.Parse(); err != nil { - return nil, err - } - - cmd := &VolumeCommand{ - withNameAndCode: newWithNameAndCode(req), - } - - for _, v := range req.args { - v = strings.TrimSpace(v) - if v == "" { - return nil, errors.New("VOLUME specified can not be an empty string") - } - cmd.Volumes = append(cmd.Volumes, v) - } - return cmd, nil - -} - -func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("STOPSIGNAL") - } - sig := req.args[0] - - cmd := &StopSignalCommand{ - Signal: sig, - withNameAndCode: newWithNameAndCode(req), - } - return cmd, nil - -} - -func parseArg(req parseRequest) (*ArgCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("ARG") - } - - kvpo := KeyValuePairOptional{} - - arg := req.args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - if len(parts[0]) == 0 { - return nil, errBlankCommandNames("ARG") - } - - kvpo.Key = parts[0] - kvpo.Value = &parts[1] - } else { - kvpo.Key = arg - } - - return &ArgCommand{ - KeyValuePairOptional: kvpo, - withNameAndCode: newWithNameAndCode(req), - }, nil -} - -func parseShell(req parseRequest) (*ShellCommand, error) { - if err := req.flags.Parse(); err != nil { - return nil, err - } - shellSlice := handleJSONArgs(req.args, req.attributes) - switch { - case len(shellSlice) == 0: - // SHELL [] - return nil, errAtLeastOneArgument("SHELL") - case req.attributes["json"]: - // SHELL ["powershell", "-command"] - - return &ShellCommand{ - Shell: strslice.StrSlice(shellSlice), - withNameAndCode: newWithNameAndCode(req), - }, nil - default: - // SHELL powershell -command - not JSON - return nil, errNotJSON("SHELL", req.original) - } -} - -func errAtLeastOneArgument(command string) error { - return errors.Errorf("%s requires at least one argument", command) -} - -func errExactlyOneArgument(command string) error { - return errors.Errorf("%s requires exactly one argument", command) -} - -func errNoDestinationArgument(command string) error { - return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command) -} - -func errBlankCommandNames(command string) error { - return errors.Errorf("%s names can not be blank", command) -} - -func errTooManyArguments(command string) error { - return errors.Errorf("Bad input to %s, too many arguments", command) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go deleted file mode 100644 index beefe775..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go +++ /dev/null @@ -1,19 +0,0 @@ -package instructions - -import "strings" - -// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile -// for exec form it returns untouched args slice -// for shell form it returns concatenated args as the first element of a slice -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go deleted file mode 100644 index 15f00ce7..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ /dev/null @@ -1,368 +0,0 @@ -package parser - -// line parsers are dispatch calls that parse a single unit of text into a -// Node object which contains the whole statement. Dockerfiles have varied -// (but not usually unique, see ONBUILD for a unique example) parsing rules -// per-command, and these unify the processing in a way that makes it -// manageable. - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only") -) - -const ( - commandLabel = "LABEL" -) - -// ignore the current argument. This will still leave a command parsed, but -// will not incorporate the arguments into the ast. -func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { - return &Node{}, nil, nil -} - -// used for onbuild. Could potentially be used for anything that represents a -// statement with sub-statements. -// -// ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// -func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - child, err := newNodeFromLine(rest, d) - if err != nil { - return nil, nil, err - } - - return &Node{Children: []*Node{child}}, nil, nil -} - -// helper to parse words (i.e space delimited or quoted strings) in a statement. -// The quotes are preserved as part of this function and they are stripped later -// as part of processWords(). -func parseWords(rest string, d *Directive) []string { - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - var chWidth int - - for pos := 0; pos <= len(rest); pos += chWidth { - if pos != len(rest) { - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(rest) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - phase = inWord // found it, fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - } - if ch == d.escapeToken { - if pos+chWidth == len(rest) { - continue // just skip an escape token at end of line - } - // If we're not quoted and we see an escape token, then always just - // add the escape token plus the char to the word, even if the char - // is a quote. - word += string(ch) - pos += chWidth - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - } - // The escape token is special except for ' quotes - can't escape anything for ' - if ch == d.escapeToken && quote != '\'' { - if pos+chWidth == len(rest) { - phase = inWord - continue // just skip the escape token at end - } - pos += chWidth - word += string(ch) - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - } - } - - return words -} - -// parse environment like statements. Note that this does *not* handle -// variable interpolation, which will be handled in the evaluator. -func parseNameVal(rest string, key string, d *Directive) (*Node, error) { - // This is kind of tricky because we need to support the old - // variant: KEY name value - // as well as the new one: KEY name=value ... - // The trigger to know which one is being used will be whether we hit - // a space or = first. space ==> old, "=" ==> new - - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil - } - - // Old format (KEY name value) - if !strings.Contains(words[0], "=") { - parts := tokenWhitespace.Split(rest, 2) - if len(parts) < 2 { - return nil, fmt.Errorf(key + " must have two arguments") - } - return newKeyValueNode(parts[0], parts[1]), nil - } - - var rootNode *Node - var prevNode *Node - for _, word := range words { - if !strings.Contains(word, "=") { - return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) - } - - parts := strings.SplitN(word, "=", 2) - node := newKeyValueNode(parts[0], parts[1]) - rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) - } - - return rootNode, nil -} - -func newKeyValueNode(key, value string) *Node { - return &Node{ - Value: key, - Next: &Node{Value: value}, - } -} - -func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { - if rootNode == nil { - rootNode = node - } - if prevNode != nil { - prevNode.Next = node - } - - prevNode = node.Next - return rootNode, prevNode -} - -func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { - node, err := parseNameVal(rest, "ENV", d) - return node, nil, err -} - -func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { - node, err := parseNameVal(rest, commandLabel, d) - return node, nil, err -} - -// parses a statement containing one or more keyword definition(s) and/or -// value assignments, like `name1 name2= name3="" name4=value`. -// Note that this is a stricter format than the old format of assignment, -// allowed by parseNameVal(), in a way that this only allows assignment of the -// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. -// In addition, a keyword definition alone is of the form `keyword` like `name1` -// above. And the assignments `name2=` and `name3=""` are equivalent and -// assign an empty value to the respective keywords. -func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var ( - rootnode *Node - prevNode *Node - ) - for i, word := range words { - node := &Node{} - node.Value = word - if i == 0 { - rootnode = node - } else { - prevNode.Next = node - } - prevNode = node - } - - return rootnode, nil, nil -} - -// parses a whitespace-delimited set of arguments. The result is effectively a -// linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node := &Node{} - rootnode := node - prevnode := node - for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp - prevnode = node - node.Value = str - node.Next = &Node{} - node = node.Next - } - - // XXX to get around regexp.Split *always* providing an empty string at the - // end due to how our loop is constructed, nil out the last node in the - // chain. - prevnode.Next = nil - - return rootnode, nil, nil -} - -// parseString just wraps the string in quotes and returns a working node. -func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - n := &Node{} - n.Value = rest - return n, nil, nil -} - -// parseJSON converts JSON arrays to an AST. -func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) - } - - var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { - return nil, nil, err - } - - var top, prev *Node - for _, str := range myJSON { - s, ok := str.(string) - if !ok { - return nil, nil, errDockerfileNotStringArray - } - - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node - } - - return top, map[string]bool{"json": true}, nil -} - -// parseMaybeJSON determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, quotes the result and returns a single -// node. -func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - node = &Node{} - node.Value = rest - return node, nil, nil -} - -// parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attempts to parse it as a whitespace -// delimited string. -func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - return parseStringsWhitespaceDelimited(rest, d) -} - -// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. -func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { - // Find end of first argument - var sep int - for ; sep < len(rest); sep++ { - if unicode.IsSpace(rune(rest[sep])) { - break - } - } - next := sep - for ; next < len(rest); next++ { - if !unicode.IsSpace(rune(rest[next])) { - break - } - } - - if sep == 0 { - return nil, nil, nil - } - - typ := rest[:sep] - cmd, attrs, err := parseMaybeJSON(rest[next:], d) - if err != nil { - return nil, nil, err - } - - return &Node{Value: typ, Next: cmd}, attrs, err -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go deleted file mode 100644 index e9268abb..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ /dev/null @@ -1,332 +0,0 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" - - "github.com/moby/buildkit/frontend/dockerfile/command" - "github.com/pkg/errors" -) - -// Node is a structure used to represent a parse tree. -// -// In the node there are three fields, Value, Next, and Children. Value is the -// current token's string value. Next is always the next non-child token, and -// children contains all the children. Here's an example: -// -// (value next (child child-next child-next-next) next-next) -// -// This data structure is frankly pretty lousy for handling complex languages, -// but lucky for us the Dockerfile isn't very complicated. This structure -// works a little more effectively than a "proper" parse tree for our needs. -// -type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends -} - -// Dump dumps the AST defined by `node` as a list of sexps. -// Returns a string suitable for printing. -func (node *Node) Dump() string { - str := "" - str += node.Value - - if len(node.Flags) > 0 { - str += fmt.Sprintf(" %q", node.Flags) - } - - for _, n := range node.Children { - str += "(" + n.Dump() + ")\n" - } - - for n := node.Next; n != nil; n = n.Next { - if len(n.Children) > 0 { - str += " " + n.Dump() - } else { - str += " " + strconv.Quote(n.Value) - } - } - - return strings.TrimSpace(str) -} - -func (node *Node) lines(start, end int) { - node.StartLine = start - node.EndLine = end -} - -// AddChild adds a new child node, and updates line information -func (node *Node) AddChild(child *Node, startLine, endLine int) { - child.lines(startLine, endLine) - if node.StartLine < 0 { - node.StartLine = startLine - } - node.EndLine = endLine - node.Children = append(node.Children, child) -} - -var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) -) - -// DefaultEscapeToken is the default escape token -const DefaultEscapeToken = '\\' - -// Directive is the structure used during a build run to hold the state of -// parsing directives. -type Directive struct { - escapeToken rune // Current escape token - lineContinuationRegex *regexp.Regexp // Current line continuation regex - processingComplete bool // Whether we are done looking for directives - escapeSeen bool // Whether the escape directive has been seen -} - -// setEscapeToken sets the default token for escaping characters in a Dockerfile. -func (d *Directive) setEscapeToken(s string) error { - if s != "`" && s != "\\" { - return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) - } - d.escapeToken = rune(s[0]) - d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) - return nil -} - -// possibleParserDirective looks for parser directives, eg '# escapeToken='. -// Parser directives must precede any builder instruction or other comments, -// and cannot be repeated. -func (d *Directive) possibleParserDirective(line string) error { - if d.processingComplete { - return nil - } - - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) != 0 { - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if d.escapeSeen { - return errors.New("only one escape parser directive can be used") - } - d.escapeSeen = true - return d.setEscapeToken(tecMatch[i]) - } - } - } - - d.processingComplete = true - return nil -} - -// NewDefaultDirective returns a new Directive with the default escapeToken token -func NewDefaultDirective() *Directive { - directive := Directive{} - directive.setEscapeToken(string(DefaultEscapeToken)) - return &directive -} - -func init() { - // Dispatch Table. see line_parsers.go for the parse functions. - // The command is parsed and mapped to the line parser. The line parser - // receives the arguments but not the command, and returns an AST after - // reformulating the arguments according to the rules in the parser - // functions. Errors are propagated up by Parse() and the resulting AST can - // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ - command.Add: parseMaybeJSONToList, - command.Arg: parseNameOrNameVal, - command.Cmd: parseMaybeJSON, - command.Copy: parseMaybeJSONToList, - command.Entrypoint: parseMaybeJSON, - command.Env: parseEnv, - command.Expose: parseStringsWhitespaceDelimited, - command.From: parseStringsWhitespaceDelimited, - command.Healthcheck: parseHealthConfig, - command.Label: parseLabel, - command.Maintainer: parseString, - command.Onbuild: parseSubCommand, - command.Run: parseMaybeJSON, - command.Shell: parseMaybeJSON, - command.StopSignal: parseString, - command.User: parseString, - command.Volume: parseMaybeJSONToList, - command.Workdir: parseString, - } -} - -// newNodeFromLine splits the line into parts, and dispatches to a function -// based on the command and command arguments. A Node is created from the -// result of the dispatch. -func newNodeFromLine(line string, directive *Directive) (*Node, error) { - cmd, flags, args, err := splitCommand(line) - if err != nil { - return nil, err - } - - fn := dispatch[cmd] - // Ignore invalid Dockerfile instructions - if fn == nil { - fn = parseIgnore - } - next, attrs, err := fn(args, directive) - if err != nil { - return nil, err - } - - return &Node{ - Value: cmd, - Original: line, - Flags: flags, - Next: next, - Attributes: attrs, - }, nil -} - -// Result is the result of parsing a Dockerfile -type Result struct { - AST *Node - EscapeToken rune - Warnings []string -} - -// PrintWarnings to the writer -func (r *Result) PrintWarnings(out io.Writer) { - if len(r.Warnings) == 0 { - return - } - fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") -} - -// Parse reads lines from a Reader, parses the lines into an AST and returns -// the AST and escape token -func Parse(rwc io.Reader) (*Result, error) { - d := NewDefaultDirective() - currentLine := 0 - root := &Node{StartLine: -1} - scanner := bufio.NewScanner(rwc) - warnings := []string{} - - var err error - for scanner.Scan() { - bytesRead := scanner.Bytes() - if currentLine == 0 { - // First line, strip the byte-order-marker if present - bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) - } - bytesRead, err = processLine(d, bytesRead, true) - if err != nil { - return nil, err - } - currentLine++ - - startLine := currentLine - line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) - if isEndOfLine && line == "" { - continue - } - - var hasEmptyContinuationLine bool - for !isEndOfLine && scanner.Scan() { - bytesRead, err := processLine(d, scanner.Bytes(), false) - if err != nil { - return nil, err - } - currentLine++ - - if isComment(scanner.Bytes()) { - // original line was a comment (processLine strips comments) - continue - } - if isEmptyContinuationLine(bytesRead) { - hasEmptyContinuationLine = true - continue - } - - continuationLine := string(bytesRead) - continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) - line += continuationLine - } - - if hasEmptyContinuationLine { - warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n "+line) - } - - child, err := newNodeFromLine(line, d) - if err != nil { - return nil, err - } - root.AddChild(child, startLine, currentLine) - } - - if len(warnings) > 0 { - warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") - } - - if root.StartLine < 0 { - return nil, errors.New("file with no instructions.") - } - - return &Result{ - AST: root, - Warnings: warnings, - EscapeToken: d.escapeToken, - }, handleScannerError(scanner.Err()) -} - -func trimComments(src []byte) []byte { - return tokenComment.ReplaceAll(src, []byte{}) -} - -func trimWhitespace(src []byte) []byte { - return bytes.TrimLeftFunc(src, unicode.IsSpace) -} - -func isComment(line []byte) bool { - return tokenComment.Match(trimWhitespace(line)) -} - -func isEmptyContinuationLine(line []byte) bool { - return len(trimWhitespace(line)) == 0 -} - -var utf8bom = []byte{0xEF, 0xBB, 0xBF} - -func trimContinuationCharacter(line string, d *Directive) (string, bool) { - if d.lineContinuationRegex.MatchString(line) { - line = d.lineContinuationRegex.ReplaceAllString(line, "") - return line, false - } - return line, true -} - -// TODO: remove stripLeftWhitespace after deprecation period. It seems silly -// to preserve whitespace on continuation lines. Why is that done? -func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { - if stripLeftWhitespace { - token = trimWhitespace(token) - } - return trimComments(token), d.possibleParserDirective(string(token)) -} - -func handleScannerError(err error) error { - switch err { - case bufio.ErrTooLong: - return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1) - default: - return err - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go deleted file mode 100644 index 171f454f..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go +++ /dev/null @@ -1,118 +0,0 @@ -package parser - -import ( - "strings" - "unicode" -) - -// splitCommand takes a single line of text and parses out the cmd and args, -// which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { - var args string - var flags []string - - // Make sure we get the same results irrespective of leading/trailing spaces - cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) - - if len(cmdline) == 2 { - var err error - args, flags, err = extractBuilderFlags(cmdline[1]) - if err != nil { - return "", nil, "", err - } - } - - return cmd, flags, strings.TrimSpace(args), nil -} - -func extractBuilderFlags(line string) (string, []string, error) { - // Parses the BuilderFlags and returns the remaining part of the line - - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - - for pos := 0; pos <= len(line); pos++ { - if pos != len(line) { - ch = rune(line[pos]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(line) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - - // Only keep going if the next word starts with -- - if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { - return line[pos:], words, nil - } - - phase = inWord // found something with "--", fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if word == "--" { - return line[pos:], words, nil - } - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - continue - } - if ch == '\\' { - if pos+1 == len(line) { - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - continue - } - if ch == '\\' { - if pos+1 == len(line) { - phase = inWord - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - } - } - - return "", words, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest deleted file mode 100644 index 38534b0c..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest +++ /dev/null @@ -1,238 +0,0 @@ -A|hello | hello -A|he'll'o | hello -A|he'llo | error -A|he\'llo | he'llo -A|he\\'llo | error -A|abc\tdef | abctdef -A|"abc\tdef" | abc\tdef -A|"abc\\tdef" | abc\tdef -A|'abc\tdef' | abc\tdef -A|hello\ | hello -A|hello\\ | hello\ -A|"hello | error -A|"hello\" | error -A|"hel'lo" | hel'lo -A|'hello | error -A|'hello\' | hello\ -A|'hello\there' | hello\there -A|'hello\\there' | hello\\there -A|"''" | '' -A|$. | $. -A|he$1x | hex -A|he$.x | he$.x -# Next one is different on Windows as $pwd==$PWD -U|he$pwd. | he. -W|he$pwd. | he/home. -A|he$PWD | he/home -A|he\$PWD | he$PWD -A|he\\$PWD | he\/home -A|"he\$PWD" | he$PWD -A|"he\\$PWD" | he\/home -A|\${} | ${} -A|\${}aaa | ${}aaa -A|he\${} | he${} -A|he\${}xx | he${}xx -A|${} | error -A|${}aaa | error -A|he${} | error -A|he${}xx | error -A|he${hi} | he -A|he${hi}xx | hexx -A|he${PWD} | he/home -A|he${.} | error -A|he${XXX:-000}xx | he000xx -A|he${PWD:-000}xx | he/homexx -A|he${XXX:-$PWD}xx | he/homexx -A|he${XXX:-${PWD:-yyy}}xx | he/homexx -A|he${XXX:-${YYY:-yyy}}xx | heyyyxx -A|he${XXX:YYY} | error -A|he${XXX?} | error -A|he${XXX:?} | error -A|he${PWD?} | he/home -A|he${PWD:?} | he/home -A|he${NULL?} | he -A|he${NULL:?} | error -A|he${XXX:+${PWD}}xx | hexx -A|he${PWD:+${XXX}}xx | hexx -A|he${PWD:+${SHELL}}xx | hebashxx -A|he${XXX:+000}xx | hexx -A|he${PWD:+000}xx | he000xx -A|'he${XX}' | he${XX} -A|"he${PWD}" | he/home -A|"he'$PWD'" | he'/home' -A|"$PWD" | /home -A|'$PWD' | $PWD -A|'\$PWD' | \$PWD -A|'"hello"' | "hello" -A|he\$PWD | he$PWD -A|"he\$PWD" | he$PWD -A|'he\$PWD' | he\$PWD -A|he${PWD | error -A|he${PWD:=000}xx | error -A|he${PWD:+${PWD}:}xx | he/home:xx -A|he${XXX:-\$PWD:}xx | he$PWD:xx -A|he${XXX:-\${PWD}z}xx | he${PWDz}xx -A|안녕하세요 | 안녕하세요 -A|안'녕'하세요 | 안녕하세요 -A|안'녕하세요 | error -A|안녕\'하세요 | 안녕'하세요 -A|안\\'녕하세요 | error -A|안녕\t하세요 | 안녕t하세요 -A|"안녕\t하세요" | 안녕\t하세요 -A|'안녕\t하세요 | error -A|안녕하세요\ | 안녕하세요 -A|안녕하세요\\ | 안녕하세요\ -A|"안녕하세요 | error -A|"안녕하세요\" | error -A|"안녕'하세요" | 안녕'하세요 -A|'안녕하세요 | error -A|'안녕하세요\' | 안녕하세요\ -A|안녕$1x | 안녕x -A|안녕$.x | 안녕$.x -# Next one is different on Windows as $pwd==$PWD -U|안녕$pwd. | 안녕. -W|안녕$pwd. | 안녕/home. -A|안녕$PWD | 안녕/home -A|안녕\$PWD | 안녕$PWD -A|안녕\\$PWD | 안녕\/home -A|안녕\${} | 안녕${} -A|안녕\${}xx | 안녕${}xx -A|안녕${} | error -A|안녕${}xx | error -A|안녕${hi} | 안녕 -A|안녕${hi}xx | 안녕xx -A|안녕${PWD} | 안녕/home -A|안녕${.} | error -A|안녕${XXX:-000}xx | 안녕000xx -A|안녕${PWD:-000}xx | 안녕/homexx -A|안녕${XXX:-$PWD}xx | 안녕/homexx -A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx -A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx -A|안녕${XXX:YYY} | error -A|안녕${XXX:+${PWD}}xx | 안녕xx -A|안녕${PWD:+${XXX}}xx | 안녕xx -A|안녕${PWD:+${SHELL}}xx | 안녕bashxx -A|안녕${XXX:+000}xx | 안녕xx -A|안녕${PWD:+000}xx | 안녕000xx -A|'안녕${XX}' | 안녕${XX} -A|"안녕${PWD}" | 안녕/home -A|"안녕'$PWD'" | 안녕'/home' -A|'"안녕"' | "안녕" -A|안녕\$PWD | 안녕$PWD -A|"안녕\$PWD" | 안녕$PWD -A|'안녕\$PWD' | 안녕\$PWD -A|안녕${PWD | error -A|안녕${PWD:=000}xx | error -A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx -A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx -A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx -A|$KOREAN | 한국어 -A|안녕$KOREAN | 안녕한국어 -A|${{aaa} | error -A|${aaa}} | } -A|${aaa | error -A|${{aaa:-bbb} | error -A|${aaa:-bbb}} | bbb} -A|${aaa:-bbb | error -A|${aaa:-bbb} | bbb -A|${aaa:-${bbb:-ccc}} | ccc -A|${aaa:-bbb ${foo} | error -A|${aaa:-bbb {foo} | bbb {foo -A|${:} | error -A|${:-bbb} | error -A|${:+bbb} | error - -# Positional parameters won't be set: -# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01 -A|$1 | -A|${1} | -A|${1:+bbb} | -A|${1:-bbb} | bbb -A|$2 | -A|${2} | -A|${2:+bbb} | -A|${2:-bbb} | bbb -A|$3 | -A|${3} | -A|${3:+bbb} | -A|${3:-bbb} | bbb -A|$4 | -A|${4} | -A|${4:+bbb} | -A|${4:-bbb} | bbb -A|$5 | -A|${5} | -A|${5:+bbb} | -A|${5:-bbb} | bbb -A|$6 | -A|${6} | -A|${6:+bbb} | -A|${6:-bbb} | bbb -A|$7 | -A|${7} | -A|${7:+bbb} | -A|${7:-bbb} | bbb -A|$8 | -A|${8} | -A|${8:+bbb} | -A|${8:-bbb} | bbb -A|$9 | -A|${9} | -A|${9:+bbb} | -A|${9:-bbb} | bbb -A|$999 | -A|${999} | -A|${999:+bbb} | -A|${999:-bbb} | bbb -A|$999aaa | aaa -A|${999}aaa | aaa -A|${999:+bbb}aaa | aaa -A|${999:-bbb}aaa | bbbaaa -A|$001 | -A|${001} | -A|${001:+bbb} | -A|${001:-bbb} | bbb -A|$001aaa | aaa -A|${001}aaa | aaa -A|${001:+bbb}aaa | aaa -A|${001:-bbb}aaa | bbbaaa - -# Special parameters won't be set in the Dockerfile: -# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 -A|$@ | -A|${@} | -A|${@:+bbb} | -A|${@:-bbb} | bbb -A|$@@@ | @@ -A|$@aaa | aaa -A|${@}aaa | aaa -A|${@:+bbb}aaa | aaa -A|${@:-bbb}aaa | bbbaaa -A|$* | -A|${*} | -A|${*:+bbb} | -A|${*:-bbb} | bbb -A|$# | -A|${#} | -A|${#:+bbb} | -A|${#:-bbb} | bbb -A|$? | -A|${?} | -A|${?:+bbb} | -A|${?:-bbb} | bbb -A|$- | -A|${-} | -A|${-:+bbb} | -A|${-:-bbb} | bbb -A|$$ | -A|${$} | -A|${$:+bbb} | -A|${$:-bbb} | bbb -A|$! | -A|${!} | -A|${!:+bbb} | -A|${!:-bbb} | bbb -A|$0 | -A|${0} | -A|${0:+bbb} | -A|${0:-bbb} | bbb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go deleted file mode 100644 index 36903ec5..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package shell - -// EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. -func EqualEnvKeys(from, to string) bool { - return from == to -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go deleted file mode 100644 index 010569bb..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package shell - -import "strings" - -// EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. -func EqualEnvKeys(from, to string) bool { - return strings.ToUpper(from) == strings.ToUpper(to) -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go deleted file mode 100644 index d65913ff..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ /dev/null @@ -1,466 +0,0 @@ -package shell - -import ( - "bytes" - "fmt" - "strings" - "text/scanner" - "unicode" - - "github.com/pkg/errors" -) - -// Lex performs shell word splitting and variable expansion. -// -// Lex takes a string and an array of env variables and -// process all quotes (" and ') as well as $xxx and ${xxx} env variable -// tokens. Tries to mimic bash shell process. -// It doesn't support all flavors of ${xx:...} formats but new ones can -// be added by adding code to the "special ${} format processing" section -type Lex struct { - escapeToken rune - RawQuotes bool - SkipUnsetEnv bool -} - -// NewLex creates a new Lex which uses escapeToken to escape quotes. -func NewLex(escapeToken rune) *Lex { - return &Lex{escapeToken: escapeToken} -} - -// ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWord(word string, env []string) (string, error) { - word, _, err := s.process(word, BuildEnvs(env)) - return word, err -} - -// ProcessWords will use the 'env' list of environment variables, -// and replace any env var references in 'word' then it will also -// return a slice of strings which represents the 'word' -// split up based on spaces - taking into account quotes. Note that -// this splitting is done **after** the env var substitutions are done. -// Note, each one is trimmed to remove leading and trailing spaces (unless -// they are quoted", but ProcessWord retains spaces between words. -func (s *Lex) ProcessWords(word string, env []string) ([]string, error) { - _, words, err := s.process(word, BuildEnvs(env)) - return words, err -} - -// ProcessWordWithMap will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) { - word, _, err := s.process(word, env) - return word, err -} - -func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { - _, words, err := s.process(word, env) - return words, err -} - -func (s *Lex) process(word string, env map[string]string) (string, []string, error) { - sw := &shellWord{ - envs: env, - escapeToken: s.escapeToken, - skipUnsetEnv: s.SkipUnsetEnv, - rawQuotes: s.RawQuotes, - } - sw.scanner.Init(strings.NewReader(word)) - return sw.process(word) -} - -type shellWord struct { - scanner scanner.Scanner - envs map[string]string - escapeToken rune - rawQuotes bool - skipUnsetEnv bool -} - -func (sw *shellWord) process(source string) (string, []string, error) { - word, words, err := sw.processStopOn(scanner.EOF) - if err != nil { - err = errors.Wrapf(err, "failed to process %q", source) - } - return word, words, err -} - -type wordsStruct struct { - word string - words []string - inWord bool -} - -func (w *wordsStruct) addChar(ch rune) { - if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" - w.inWord = false - } - } else if !unicode.IsSpace(ch) { - w.addRawChar(ch) - } -} - -func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) - w.inWord = true -} - -func (w *wordsStruct) addString(str string) { - for _, ch := range str { - w.addChar(ch) - } -} - -func (w *wordsStruct) addRawString(str string) { - w.word += str - w.inWord = true -} - -func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) - - // Just in case we're called again by mistake - w.word = "" - w.inWord = false - } - return w.words -} - -// Process the word, starting at 'pos', and stop when we get to the -// end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result bytes.Buffer - var words wordsStruct - - var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, - } - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - - if stopChar != scanner.EOF && ch == stopChar { - sw.scanner.Next() - return result.String(), words.getWords(), nil - } - if fn, ok := charFuncMapping[ch]; ok { - // Call special processing func for certain chars - tmp, err := fn() - if err != nil { - return "", []string{}, err - } - result.WriteString(tmp) - - if ch == rune('$') { - words.addString(tmp) - } else { - words.addRawString(tmp) - } - } else { - // Not special, just add it to the result - ch = sw.scanner.Next() - - if ch == sw.escapeToken { - // '\' (default escape token, but ` allowed) escapes, except end of line - ch = sw.scanner.Next() - - if ch == scanner.EOF { - break - } - - words.addRawChar(ch) - } else { - words.addChar(ch) - } - - result.WriteRune(ch) - } - } - if stopChar != scanner.EOF { - return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar)) - } - return result.String(), words.getWords(), nil -} - -func (sw *shellWord) processSingleQuote() (string, error) { - // All chars between single quotes are taken as-is - // Note, you can't escape ' - // - // From the "sh" man page: - // Single Quotes - // Enclosing characters in single quotes preserves the literal meaning of - // all the characters (except single quotes, making it impossible to put - // single-quotes in a single-quoted string). - - var result bytes.Buffer - - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - - for { - ch = sw.scanner.Next() - switch ch { - case scanner.EOF: - return "", errors.New("unexpected end of statement while looking for matching single-quote") - case '\'': - if sw.rawQuotes { - result.WriteRune(ch) - } - return result.String(), nil - } - result.WriteRune(ch) - } -} - -func (sw *shellWord) processDoubleQuote() (string, error) { - // All chars up to the next " are taken as-is, even ', except any $ chars - // But you can escape " with a \ (or ` if escape token set accordingly) - // - // From the "sh" man page: - // Double Quotes - // Enclosing characters within double quotes preserves the literal meaning - // of all characters except dollarsign ($), backquote (`), and backslash - // (\). The backslash inside double quotes is historically weird, and - // serves to quote only the following characters: - // $ ` " \ . - // Otherwise it remains literal. - - var result bytes.Buffer - - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - - for { - switch sw.scanner.Peek() { - case scanner.EOF: - return "", errors.New("unexpected end of statement while looking for matching double-quote") - case '"': - ch := sw.scanner.Next() - if sw.rawQuotes { - result.WriteRune(ch) - } - return result.String(), nil - case '$': - value, err := sw.processDollar() - if err != nil { - return "", err - } - result.WriteString(value) - default: - ch := sw.scanner.Next() - if ch == sw.escapeToken { - switch sw.scanner.Peek() { - case scanner.EOF: - // Ignore \ at end of word - continue - case '"', '$', sw.escapeToken: - // These chars can be escaped, all other \'s are left as-is - // Note: for now don't do anything special with ` chars. - // Not sure what to do with them anyway since we're not going - // to execute the text in there (not now anyway). - ch = sw.scanner.Next() - } - } - result.WriteRune(ch) - } - } -} - -func (sw *shellWord) processDollar() (string, error) { - sw.scanner.Next() - - // $xxx case - if sw.scanner.Peek() != '{' { - name := sw.processName() - if name == "" { - return "$", nil - } - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { - return "$" + name, nil - } - return value, nil - } - - sw.scanner.Next() - switch sw.scanner.Peek() { - case scanner.EOF: - return "", errors.New("syntax error: missing '}'") - case '{', '}', ':': - // Invalid ${{xx}, ${:xx}, ${:}. ${} case - return "", errors.New("syntax error: bad substitution") - } - name := sw.processName() - ch := sw.scanner.Next() - switch ch { - case '}': - // Normal ${xx} case - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s}", name), nil - } - return value, nil - case '?': - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - newValue, found := sw.getEnv(name) - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s?%s}", name, word), nil - } - message := "is not allowed to be unset" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - return newValue, nil - case ':': - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue, found := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - - return newValue, nil - - case '?': - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - message := "is not allowed to be unset" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - if newValue == "" { - message := "is not allowed to be empty" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - return newValue, nil - - default: - return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) - } - } - return "", errors.Errorf("missing ':' in substitution") -} - -func (sw *shellWord) processName() string { - // Read in a name (alphanumeric or _) - // If it starts with a numeric then just return $# - var name bytes.Buffer - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if name.Len() == 0 && unicode.IsDigit(ch) { - for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) { - // Keep reading until the first non-digit character, or EOF - ch = sw.scanner.Next() - name.WriteRune(ch) - } - return name.String() - } - if name.Len() == 0 && isSpecialParam(ch) { - ch = sw.scanner.Next() - return string(ch) - } - if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { - break - } - ch = sw.scanner.Next() - name.WriteRune(ch) - } - - return name.String() -} - -// isSpecialParam checks if the provided character is a special parameters, -// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 -func isSpecialParam(char rune) bool { - switch char { - case '@', '*', '#', '?', '-', '$', '!', '0': - // Special parameters - // http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 - return true - } - return false -} - -func (sw *shellWord) getEnv(name string) (string, bool) { - for key, value := range sw.envs { - if EqualEnvKeys(name, key) { - return value, true - } - } - return "", false -} - -func BuildEnvs(env []string) map[string]string { - envs := map[string]string{} - - for _, e := range env { - i := strings.Index(e, "=") - - if i < 0 { - envs[e] = "" - } else { - k := e[:i] - v := e[i+1:] - - // overwrite value if key already exists - envs[k] = v - } - } - - return envs -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest deleted file mode 100644 index 1fd9f194..00000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest +++ /dev/null @@ -1,30 +0,0 @@ -hello | hello -hello${hi}bye | hellobye -ENV hi=hi -hello${hi}bye | hellohibye -ENV space=abc def -hello${space}bye | helloabc,defbye -hello"${space}"bye | helloabc defbye -hello "${space}"bye | hello,abc defbye -ENV leading= ab c -hello${leading}def | hello,ab,cdef -hello"${leading}" def | hello ab c,def -hello"${leading}" | hello ab c -hello${leading} | hello,ab,c -# next line MUST have 3 trailing spaces, don't erase them! -ENV trailing=ab c -hello${trailing} | helloab,c -hello${trailing}d | helloab,c,d -hello"${trailing}"d | helloab c d -# next line MUST have 3 trailing spaces, don't erase them! -hel"lo${trailing}" | helloab c -hello" there " | hello there -hello there | hello,there -hello\ there | hello there -hello" there | error -hello\" there | hello",there -hello"\\there" | hello\there -hello"\there" | hello\there -hello'\\there' | hello\\there -hello'\there' | hello\there -hello'$there' | hello$there diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go deleted file mode 100644 index 2fea6480..00000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ /dev/null @@ -1,217 +0,0 @@ -package forwarder - -import ( - "context" - "sync" - - "github.com/moby/buildkit/cache" - cacheutil "github.com/moby/buildkit/cache/util" - clienttypes "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway/client" - gwpb "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/solver" - opspb "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/worker" - "github.com/pkg/errors" - fstypes "github.com/tonistiigi/fsutil/types" -) - -func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) { - return &bridgeClient{ - opts: opts, - inputs: inputs, - FrontendLLBBridge: llbBridge, - sid: session.FromContext(ctx), - workerInfos: workerInfos, - final: map[*ref]struct{}{}, - }, nil -} - -type bridgeClient struct { - frontend.FrontendLLBBridge - mu sync.Mutex - opts map[string]string - inputs map[string]*opspb.Definition - final map[*ref]struct{} - sid string - exporterAttr map[string][]byte - refs []*ref - workerInfos []clienttypes.WorkerInfo -} - -func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { - res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ - Definition: req.Definition, - Frontend: req.Frontend, - FrontendOpt: req.FrontendOpt, - FrontendInputs: req.FrontendInputs, - CacheImports: req.CacheImports, - }) - if err != nil { - return nil, err - } - - cRes := &client.Result{} - c.mu.Lock() - for k, r := range res.Refs { - rr, err := newRef(r) - if err != nil { - return nil, err - } - c.refs = append(c.refs, rr) - cRes.AddRef(k, rr) - } - if r := res.Ref; r != nil { - rr, err := newRef(r) - if err != nil { - return nil, err - } - c.refs = append(c.refs, rr) - cRes.SetRef(rr) - } - c.mu.Unlock() - cRes.Metadata = res.Metadata - - return cRes, nil -} -func (c *bridgeClient) BuildOpts() client.BuildOpts { - workers := make([]client.WorkerInfo, 0, len(c.workerInfos)) - for _, w := range c.workerInfos { - workers = append(workers, client.WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: w.Platforms, - }) - } - - return client.BuildOpts{ - Opts: c.opts, - SessionID: c.sid, - Workers: workers, - Product: apicaps.ExportedProduct, - Caps: gwpb.Caps.CapSet(gwpb.Caps.All()), - LLBCaps: opspb.Caps.CapSet(opspb.Caps.All()), - } -} - -func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { - inputs := make(map[string]llb.State) - for key, def := range c.inputs { - defop, err := llb.NewDefinitionOp(def) - if err != nil { - return nil, err - } - inputs[key] = llb.NewState(defop) - } - return inputs, nil -} - -func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { - if r == nil { - return nil, nil - } - - res := &frontend.Result{} - - if r.Refs != nil { - res.Refs = make(map[string]solver.ResultProxy, len(r.Refs)) - for k, r := range r.Refs { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) - } - c.final[rr] = struct{}{} - res.Refs[k] = rr.ResultProxy - } - } - if r := r.Ref; r != nil { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) - } - c.final[rr] = struct{}{} - res.Ref = rr.ResultProxy - } - res.Metadata = r.Metadata - - return res, nil -} - -func (c *bridgeClient) discard(err error) { - for _, r := range c.refs { - if r != nil { - if _, ok := c.final[r]; !ok || err != nil { - r.Release(context.TODO()) - } - } - } -} - -type ref struct { - solver.ResultProxy -} - -func newRef(r solver.ResultProxy) (*ref, error) { - return &ref{ResultProxy: r}, nil -} - -func (r *ref) ToState() (st llb.State, err error) { - defop, err := llb.NewDefinitionOp(r.Definition()) - if err != nil { - return st, err - } - return llb.NewState(defop), nil -} - -func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { - ref, err := r.getImmutableRef(ctx) - if err != nil { - return nil, err - } - newReq := cacheutil.ReadRequest{ - Filename: req.Filename, - } - if r := req.Range; r != nil { - newReq.Range = &cacheutil.FileRange{ - Offset: r.Offset, - Length: r.Length, - } - } - return cacheutil.ReadFile(ctx, ref, newReq) -} - -func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { - ref, err := r.getImmutableRef(ctx) - if err != nil { - return nil, err - } - newReq := cacheutil.ReadDirRequest{ - Path: req.Path, - IncludePattern: req.IncludePattern, - } - return cacheutil.ReadDir(ctx, ref, newReq) -} - -func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { - ref, err := r.getImmutableRef(ctx) - if err != nil { - return nil, err - } - return cacheutil.StatFile(ctx, ref, req.Path) -} - -func (r *ref) getImmutableRef(ctx context.Context) (cache.ImmutableRef, error) { - rr, err := r.ResultProxy.Result(ctx) - if err != nil { - return nil, err - } - ref, ok := rr.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid ref: %T", rr.Sys()) - } - return ref.ImmutableRef, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go deleted file mode 100644 index 48c946d4..00000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go +++ /dev/null @@ -1,39 +0,0 @@ -package forwarder - -import ( - "context" - - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/solver/pb" -) - -func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend { - return &GatewayForwarder{ - workers: w, - f: f, - } -} - -type GatewayForwarder struct { - workers frontend.WorkerInfos - f client.BuildFunc -} - -func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition) (retRes *frontend.Result, retErr error) { - c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos()) - if err != nil { - return nil, err - } - - defer func() { - c.discard(retErr) - }() - - res, err := gf.f(ctx, c) - if err != nil { - return nil, err - } - - return c.toFrontendResult(res) -} diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go deleted file mode 100644 index 19755816..00000000 --- a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go +++ /dev/null @@ -1,459 +0,0 @@ -package bboltcachestorage - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/moby/buildkit/solver" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" -) - -const ( - resultBucket = "_result" - linksBucket = "_links" - byResultBucket = "_byresult" - backlinksBucket = "_backlinks" -) - -type Store struct { - db *bolt.DB -} - -func NewStore(dbPath string) (*Store, error) { - db, err := bolt.Open(dbPath, 0600, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) - } - if err := db.Update(func(tx *bolt.Tx) error { - for _, b := range []string{resultBucket, linksBucket, byResultBucket, backlinksBucket} { - if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil { - return err - } - } - return nil - }); err != nil { - return nil, err - } - db.NoSync = true - return &Store{db: db}, nil -} - -func (s *Store) Exists(id string) bool { - exists := false - err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)).Bucket([]byte(id)) - exists = b != nil - return nil - }) - if err != nil { - return false - } - return exists -} - -func (s *Store) Walk(fn func(id string) error) error { - ids := make([]string, 0) - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - ids = append(ids, string(k)) - } - } - return nil - }); err != nil { - return err - } - for _, id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (s *Store) WalkResults(id string, fn func(solver.CacheResult) error) error { - var list []solver.CacheResult - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(resultBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - return b.ForEach(func(k, v []byte) error { - var res solver.CacheResult - if err := json.Unmarshal(v, &res); err != nil { - return err - } - list = append(list, res) - return nil - }) - }); err != nil { - return err - } - for _, res := range list { - if err := fn(res); err != nil { - return err - } - } - return nil -} - -func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) { - var res solver.CacheResult - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(resultBucket)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - b = b.Bucket([]byte(id)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - - v := b.Get([]byte(resultID)) - if v == nil { - return errors.WithStack(solver.ErrNotFound) - } - - return json.Unmarshal(v, &res) - }); err != nil { - return solver.CacheResult{}, err - } - return res, nil -} - -func (s *Store) AddResult(id string, res solver.CacheResult) error { - return s.db.Update(func(tx *bolt.Tx) error { - _, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - - b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - dt, err := json.Marshal(res) - if err != nil { - return err - } - if err := b.Put([]byte(res.ID), dt); err != nil { - return err - } - b, err = tx.Bucket([]byte(byResultBucket)).CreateBucketIfNotExists([]byte(res.ID)) - if err != nil { - return err - } - if err := b.Put([]byte(id), []byte{}); err != nil { - return err - } - - return nil - }) -} - -func (s *Store) WalkIDsByResult(resultID string, fn func(string) error) error { - ids := map[string]struct{}{} - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(byResultBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(resultID)) - if b == nil { - return nil - } - return b.ForEach(func(k, v []byte) error { - ids[string(k)] = struct{}{} - return nil - }) - }); err != nil { - return err - } - for id := range ids { - if err := fn(id); err != nil { - return err - } - } - return nil -} - -func (s *Store) Release(resultID string) error { - return s.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(byResultBucket)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - b = b.Bucket([]byte(resultID)) - if b == nil { - return errors.WithStack(solver.ErrNotFound) - } - if err := b.ForEach(func(k, v []byte) error { - return s.releaseHelper(tx, string(k), resultID) - }); err != nil { - return err - } - return nil - }) -} - -func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error { - results := tx.Bucket([]byte(resultBucket)).Bucket([]byte(id)) - if results == nil { - return nil - } - - if err := results.Delete([]byte(resultID)); err != nil { - return err - } - - ids := tx.Bucket([]byte(byResultBucket)) - - ids = ids.Bucket([]byte(resultID)) - if ids == nil { - return nil - } - - if err := ids.Delete([]byte(id)); err != nil { - return err - } - - if isEmptyBucket(ids) { - if err := tx.Bucket([]byte(byResultBucket)).DeleteBucket([]byte(resultID)); err != nil { - return err - } - } - - links := tx.Bucket([]byte(resultBucket)) - if results == nil { - return nil - } - links = links.Bucket([]byte(id)) - - return s.emptyBranchWithParents(tx, []byte(id)) -} - -func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error { - results := tx.Bucket([]byte(resultBucket)).Bucket(id) - if results == nil { - return nil - } - - isEmptyLinks := true - links := tx.Bucket([]byte(linksBucket)).Bucket(id) - if links != nil { - isEmptyLinks = isEmptyBucket(links) - } - - if !isEmptyBucket(results) || !isEmptyLinks { - return nil - } - - if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil { - if err := backlinks.ForEach(func(k, v []byte) error { - if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil { - if err := subLinks.ForEach(func(k, v []byte) error { - parts := bytes.Split(k, []byte("@")) - if len(parts) != 2 { - return errors.Errorf("invalid key %s", k) - } - if bytes.Equal(id, parts[1]) { - return subLinks.Delete(k) - } - return nil - }); err != nil { - return err - } - - if isEmptyBucket(subLinks) { - if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil { - return err - } - } - } - return s.emptyBranchWithParents(tx, k) - }); err != nil { - return err - } - if err := tx.Bucket([]byte(backlinksBucket)).DeleteBucket(id); err != nil { - return err - } - } - - // intentionally ignoring errors - tx.Bucket([]byte(linksBucket)).DeleteBucket([]byte(id)) - tx.Bucket([]byte(resultBucket)).DeleteBucket([]byte(id)) - - return nil -} - -func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) error { - return s.db.Update(func(tx *bolt.Tx) error { - b, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) - if err != nil { - return err - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - - if err := b.Put(bytes.Join([][]byte{dt, []byte(target)}, []byte("@")), []byte{}); err != nil { - return err - } - - b, err = tx.Bucket([]byte(backlinksBucket)).CreateBucketIfNotExists([]byte(target)) - if err != nil { - return err - } - - if err := b.Put([]byte(id), []byte{}); err != nil { - return err - } - - return nil - }) -} - -func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { - var links []string - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - index := bytes.Join([][]byte{dt, {}}, []byte("@")) - c := b.Cursor() - k, _ := c.Seek([]byte(index)) - for { - if k != nil && bytes.HasPrefix(k, index) { - target := bytes.TrimPrefix(k, index) - links = append(links, string(target)) - k, _ = c.Next() - } else { - break - } - } - - return nil - }); err != nil { - return err - } - for _, l := range links { - if err := fn(l); err != nil { - return err - } - } - return nil -} - -func (s *Store) HasLink(id string, link solver.CacheInfoLink, target string) bool { - var v bool - if err := s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(linksBucket)) - if b == nil { - return nil - } - b = b.Bucket([]byte(id)) - if b == nil { - return nil - } - - dt, err := json.Marshal(link) - if err != nil { - return err - } - v = b.Get(bytes.Join([][]byte{dt, []byte(target)}, []byte("@"))) != nil - return nil - }); err != nil { - return false - } - return v -} - -func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { - var outIDs []string - var outLinks []solver.CacheInfoLink - - if err := s.db.View(func(tx *bolt.Tx) error { - links := tx.Bucket([]byte(linksBucket)) - if links == nil { - return nil - } - backLinks := tx.Bucket([]byte(backlinksBucket)) - if backLinks == nil { - return nil - } - b := backLinks.Bucket([]byte(id)) - if b == nil { - return nil - } - - if err := b.ForEach(func(bid, v []byte) error { - b = links.Bucket(bid) - if b == nil { - return nil - } - if err := b.ForEach(func(k, v []byte) error { - parts := bytes.Split(k, []byte("@")) - if len(parts) == 2 { - if string(parts[1]) != id { - return nil - } - var l solver.CacheInfoLink - if err := json.Unmarshal(parts[0], &l); err != nil { - return err - } - l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output))) - l.Output = 0 - outIDs = append(outIDs, string(bid)) - outLinks = append(outLinks, l) - } - return nil - }); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return nil - }); err != nil { - return err - } - - for i := range outIDs { - if err := fn(outIDs[i], outLinks[i]); err != nil { - return err - } - } - return nil -} - -func isEmptyBucket(b *bolt.Bucket) bool { - if b == nil { - return true - } - k, _ := b.Cursor().First() - return k == nil -} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s b/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go deleted file mode 100644 index 5346654e..00000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go +++ /dev/null @@ -1,120 +0,0 @@ -package cniprovider - -import ( - "context" - "os" - "path/filepath" - "syscall" - - "github.com/containerd/containerd/oci" - "github.com/containerd/go-cni" - "github.com/gofrs/flock" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/util/network" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -type Opt struct { - Root string - ConfigPath string - BinaryDir string -} - -func New(opt Opt) (network.Provider, error) { - if _, err := os.Stat(opt.ConfigPath); err != nil { - return nil, errors.Wrapf(err, "failed to read cni config %q", opt.ConfigPath) - } - if _, err := os.Stat(opt.BinaryDir); err != nil { - return nil, errors.Wrapf(err, "failed to read cni binary dir %q", opt.BinaryDir) - } - - cniHandle, err := cni.New( - cni.WithMinNetworkCount(2), - cni.WithConfFile(opt.ConfigPath), - cni.WithPluginDir([]string{opt.BinaryDir}), - cni.WithLoNetwork, - cni.WithInterfacePrefix(("eth"))) - if err != nil { - return nil, err - } - - if err != nil { - return nil, err - } - - cp := &cniProvider{CNI: cniHandle, root: opt.Root} - if err := cp.initNetwork(); err != nil { - return nil, err - } - return cp, nil -} - -type cniProvider struct { - cni.CNI - root string -} - -func (c *cniProvider) initNetwork() error { - if v := os.Getenv("BUILDKIT_CNI_INIT_LOCK_PATH"); v != "" { - l := flock.New(v) - if err := l.Lock(); err != nil { - return err - } - defer l.Unlock() - } - ns, err := c.New() - if err != nil { - return err - } - return ns.Close() -} - -func (c *cniProvider) New() (network.Namespace, error) { - id := identity.NewID() - nsPath := filepath.Join(c.root, "net/cni", id) - if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { - return nil, err - } - - if err := createNetNS(nsPath); err != nil { - os.RemoveAll(filepath.Dir(nsPath)) - return nil, err - } - - if _, err := c.CNI.Setup(context.TODO(), id, nsPath); err != nil { - os.RemoveAll(filepath.Dir(nsPath)) - return nil, errors.Wrap(err, "CNI setup error") - } - - return &cniNS{path: nsPath, id: id, handle: c.CNI}, nil -} - -type cniNS struct { - handle cni.CNI - id string - path string -} - -func (ns *cniNS) Set(s *specs.Spec) { - oci.WithLinuxNamespace(specs.LinuxNamespace{ - Type: specs.NetworkNamespace, - Path: ns.path, - })(nil, nil, nil, s) -} - -func (ns *cniNS) Close() error { - err := ns.handle.Remove(context.TODO(), ns.id, ns.path) - - if err1 := unix.Unmount(ns.path, unix.MNT_DETACH); err1 != nil { - if err1 != syscall.EINVAL && err1 != syscall.ENOENT && err == nil { - err = errors.Wrap(err1, "error unmounting network namespace") - } - } - if err1 := os.RemoveAll(filepath.Dir(ns.path)); err1 != nil && !os.IsNotExist(err1) && err == nil { - err = errors.Wrap(err, "error removing network namespace") - } - - return err -} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go deleted file mode 100644 index 99490813..00000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build linux - -package cniprovider - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go deleted file mode 100644 index 87012cf8..00000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build linux - -package cniprovider - -import ( - "os" - "syscall" - "unsafe" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func createNetNS(p string) error { - f, err := os.Create(p) - if err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - procNetNSBytes, err := syscall.BytePtrFromString("/proc/self/ns/net") - if err != nil { - return err - } - pBytes, err := syscall.BytePtrFromString(p) - if err != nil { - return err - } - beforeFork() - - pid, _, errno := syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|unix.CLONE_NEWNET, 0, 0, 0, 0, 0) - if errno != 0 { - afterFork() - return errno - } - - if pid != 0 { - afterFork() - var ws unix.WaitStatus - _, err = unix.Wait4(int(pid), &ws, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), &ws, 0, nil) - } - - if err != nil { - return errors.Wrapf(err, "failed to find pid=%d process", pid) - } - errno = syscall.Errno(ws.ExitStatus()) - if errno != 0 { - return errors.Wrap(errno, "failed to mount") - } - return nil - } - afterForkInChild() - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, uintptr(unsafe.Pointer(procNetNSBytes)), uintptr(unsafe.Pointer(pBytes)), 0, uintptr(unix.MS_BIND), 0, 0) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go deleted file mode 100644 index 360ffb27..00000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package cniprovider - -import "github.com/pkg/errors" - -func createNetNS(p string) error { - return errors.Errorf("creating netns for cni not supported") -} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network.go deleted file mode 100644 index 7b0b765b..00000000 --- a/vendor/github.com/moby/buildkit/util/network/netproviders/network.go +++ /dev/null @@ -1,50 +0,0 @@ -package netproviders - -import ( - "os" - - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/network" - "github.com/moby/buildkit/util/network/cniprovider" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type Opt struct { - CNI cniprovider.Opt - Mode string -} - -// Providers returns the network provider set -func Providers(opt Opt) (map[pb.NetMode]network.Provider, error) { - var defaultProvider network.Provider - switch opt.Mode { - case "cni": - cniProvider, err := cniprovider.New(opt.CNI) - if err != nil { - return nil, err - } - defaultProvider = cniProvider - case "host": - defaultProvider = network.NewHostProvider() - case "auto", "": - if _, err := os.Stat(opt.CNI.ConfigPath); err == nil { - cniProvider, err := cniprovider.New(opt.CNI) - if err != nil { - return nil, err - } - defaultProvider = cniProvider - } else { - logrus.Warnf("using host network as the default") - defaultProvider = network.NewHostProvider() - } - default: - return nil, errors.Errorf("invalid network mode: %q", opt.Mode) - } - - return map[pb.NetMode]network.Provider{ - pb.NetMode_UNSET: defaultProvider, - pb.NetMode_HOST: network.NewHostProvider(), - pb.NetMode_NONE: network.NewNoneProvider(), - }, nil -} diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go deleted file mode 100644 index 12646e43..00000000 --- a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package specconv - -import ( - "strings" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// ToRootless converts spec to be compatible with "rootless" runc. -// * Remove /sys mount -// * Remove cgroups -// -// See docs/rootless.md for the supported runc revision. -func ToRootless(spec *specs.Spec) error { - // Remove /sys mount because we can't mount /sys when the daemon netns - // is not unshared from the host. - // - // Instead, we could bind-mount /sys from the host, however, `rbind, ro` - // does not make /sys/fs/cgroup read-only (and we can't bind-mount /sys - // without rbind) - // - // PR for making /sys/fs/cgroup read-only is proposed, but it is very - // complicated: https://github.com/opencontainers/runc/pull/1869 - // - // For buildkit usecase, we suppose we don't need to provide /sys to - // containers and remove /sys mount as a workaround. - var mounts []specs.Mount - for _, mount := range spec.Mounts { - if strings.HasPrefix(mount.Destination, "/sys") { - continue - } - mounts = append(mounts, mount) - } - spec.Mounts = mounts - - // Remove cgroups so as to avoid `container_linux.go:337: starting container process caused "process_linux.go:280: applying cgroup configuration for process caused \"mkdir /sys/fs/cgroup/cpuset/buildkit: permission denied\""` - spec.Linux.Resources = nil - spec.Linux.CgroupsPath = "" - return nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 71662633..fa0cda5a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -61,8 +61,6 @@ github.com/chuckpreslar/emission github.com/codegangsta/inject # github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d github.com/containerd/cgroups/stats/v1 -# github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d -github.com/containerd/console # github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55 ## explicit github.com/containerd/containerd/api/services/content/v1 @@ -103,7 +101,6 @@ github.com/containerd/containerd/rootfs github.com/containerd/containerd/services/content/contentserver github.com/containerd/containerd/snapshots github.com/containerd/containerd/snapshots/native -github.com/containerd/containerd/snapshots/overlay github.com/containerd/containerd/snapshots/storage github.com/containerd/containerd/sys github.com/containerd/containerd/version @@ -111,21 +108,10 @@ github.com/containerd/containerd/version github.com/containerd/continuity/fs github.com/containerd/continuity/syscallx github.com/containerd/continuity/sysx -# github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 -github.com/containerd/go-cni -# github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328 -github.com/containerd/go-runc # github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 github.com/containerd/ttrpc # github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 github.com/containerd/typeurl -# github.com/containernetworking/cni v0.7.1 -github.com/containernetworking/cni/libcni -github.com/containernetworking/cni/pkg/invoke -github.com/containernetworking/cni/pkg/types -github.com/containernetworking/cni/pkg/types/020 -github.com/containernetworking/cni/pkg/types/current -github.com/containernetworking/cni/pkg/version # github.com/cpuguy83/go-md2man/v2 v2.0.0 github.com/cpuguy83/go-md2man/v2/md2man # github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 @@ -142,6 +128,7 @@ github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/credentials github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.7.1+incompatible +## explicit github.com/docker/distribution github.com/docker/distribution/digestset github.com/docker/distribution/manifest @@ -162,7 +149,6 @@ github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime github.com/docker/docker/api/types/versions -github.com/docker/docker/builder/dockerignore github.com/docker/docker/errdefs github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/chrootarchive @@ -175,7 +161,6 @@ github.com/docker/docker/pkg/locker github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/reexec -github.com/docker/docker/pkg/signal github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system github.com/docker/docker/pkg/term @@ -208,7 +193,6 @@ github.com/fsnotify/fsnotify github.com/fsouza/go-dockerclient # github.com/genuinetools/img v0.5.11 ## explicit -github.com/genuinetools/img/client github.com/genuinetools/img/types # github.com/ghodss/yaml v1.0.0 ## explicit @@ -382,23 +366,18 @@ github.com/moby/buildkit/cache/blobs github.com/moby/buildkit/cache/contenthash github.com/moby/buildkit/cache/metadata github.com/moby/buildkit/cache/remotecache -github.com/moby/buildkit/cache/remotecache/inline -github.com/moby/buildkit/cache/remotecache/local -github.com/moby/buildkit/cache/remotecache/registry github.com/moby/buildkit/cache/remotecache/v1 github.com/moby/buildkit/cache/util github.com/moby/buildkit/client github.com/moby/buildkit/client/buildid github.com/moby/buildkit/client/connhelper github.com/moby/buildkit/client/llb -github.com/moby/buildkit/client/llb/imagemetaresolver github.com/moby/buildkit/client/ociindex github.com/moby/buildkit/cmd/buildkitd/config github.com/moby/buildkit/control github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor github.com/moby/buildkit/executor/oci -github.com/moby/buildkit/executor/runcexecutor github.com/moby/buildkit/exporter github.com/moby/buildkit/exporter/containerimage github.com/moby/buildkit/exporter/containerimage/exptypes @@ -406,15 +385,8 @@ github.com/moby/buildkit/exporter/local github.com/moby/buildkit/exporter/oci github.com/moby/buildkit/exporter/tar github.com/moby/buildkit/frontend -github.com/moby/buildkit/frontend/dockerfile/builder -github.com/moby/buildkit/frontend/dockerfile/command -github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb -github.com/moby/buildkit/frontend/dockerfile/instructions -github.com/moby/buildkit/frontend/dockerfile/parser -github.com/moby/buildkit/frontend/dockerfile/shell github.com/moby/buildkit/frontend/gateway github.com/moby/buildkit/frontend/gateway/client -github.com/moby/buildkit/frontend/gateway/forwarder github.com/moby/buildkit/frontend/gateway/grpcclient github.com/moby/buildkit/frontend/gateway/pb github.com/moby/buildkit/identity @@ -432,7 +404,6 @@ github.com/moby/buildkit/snapshot github.com/moby/buildkit/snapshot/containerd github.com/moby/buildkit/snapshot/imagerefchecker github.com/moby/buildkit/solver -github.com/moby/buildkit/solver/bboltcachestorage github.com/moby/buildkit/solver/internal/pipe github.com/moby/buildkit/solver/llbsolver github.com/moby/buildkit/solver/llbsolver/file @@ -457,14 +428,11 @@ github.com/moby/buildkit/util/flightcontrol github.com/moby/buildkit/util/imageutil github.com/moby/buildkit/util/leaseutil github.com/moby/buildkit/util/network -github.com/moby/buildkit/util/network/cniprovider -github.com/moby/buildkit/util/network/netproviders github.com/moby/buildkit/util/progress github.com/moby/buildkit/util/progress/logs github.com/moby/buildkit/util/pull github.com/moby/buildkit/util/push github.com/moby/buildkit/util/resolver -github.com/moby/buildkit/util/rootless/specconv github.com/moby/buildkit/util/system github.com/moby/buildkit/util/throttle github.com/moby/buildkit/util/tracing @@ -553,6 +521,7 @@ github.com/openSUSE/umoci/third_party/user # github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 +## explicit github.com/opencontainers/image-spec/identity github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 @@ -685,7 +654,6 @@ golang.org/x/net/trace golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 -## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02