Support priv/unpriv image extraction

Optionally add back privileged extraction which can be enabled with
LUET_PRIVILEGED_EXTRACT=true

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
This commit is contained in:
Ettore Di Giacinto
2021-06-16 23:29:23 +02:00
parent 8780e4f16f
commit 92e18d5782
663 changed files with 157764 additions and 203 deletions

58
vendor/github.com/docker/cli/AUTHORS generated vendored
View File

@@ -8,6 +8,7 @@ Aaron.L.Xu <likexu@harmonycloud.cn>
Abdur Rehman <abdur_rehman@mentor.com>
Abhinandan Prativadi <abhi@docker.com>
Abin Shahab <ashahab@altiscale.com>
Abreto FU <public@abreto.email>
Ace Tang <aceapril@126.com>
Addam Hardy <addam.hardy@gmail.com>
Adolfo Ochagavía <aochagavia92@gmail.com>
@@ -17,12 +18,15 @@ Adrien Folie <folie.adrien@gmail.com>
Ahmet Alp Balkan <ahmetb@microsoft.com>
Aidan Feldman <aidan.feldman@gmail.com>
Aidan Hobson Sayers <aidanhs@cantab.net>
AJ Bowen <aj@gandi.net>
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
AJ Bowen <aj@soulshake.net>
Akhil Mohan <akhil.mohan@mayadata.io>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akim Demaille <akim.demaille@docker.com>
Alan Thompson <cloojure@gmail.com>
Albert Callarisa <shark234@gmail.com>
Albin Kerouanton <albin@akerouanton.name>
Aleksa Sarai <asarai@suse.de>
Aleksander Piotrowski <apiotrowski312@gmail.com>
Alessandro Boch <aboch@tetrationanalytics.com>
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
Alex Mayer <amayer5125@gmail.com>
@@ -40,6 +44,7 @@ Amir Goldstein <amir73il@aquasec.com>
Amit Krishnan <amit.krishnan@oracle.com>
Amit Shukla <amit.shukla@docker.com>
Amy Lindburg <amy.lindburg@docker.com>
Anca Iordache <anca.iordache@docker.com>
Anda Xu <anda.xu@docker.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andreas Köhler <andi5.py@gmx.net>
@@ -49,6 +54,7 @@ Andrew Macpherson <hopscotch23@gmail.com>
Andrew McDonnell <bugs@andrewmcdonnell.net>
Andrew Po <absourd.noise@gmail.com>
Andrey Petrov <andrey.petrov@shazow.net>
Andrii Berehuliak <berkusandrew@gmail.com>
André Martins <aanm90@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Andy Rothfusz <github@developersupport.net>
@@ -61,7 +67,9 @@ Antonis Kalipetis <akalipetis@gmail.com>
Anusha Ragunathan <anusha.ragunathan@docker.com>
Ao Li <la9249@163.com>
Arash Deshmeh <adeshmeh@ca.ibm.com>
Arko Dasgupta <arko.dasgupta@docker.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Peka <arthur.peka@outlook.com>
Ashwini Oruganti <ashwini.oruganti@gmail.com>
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
@@ -87,6 +95,7 @@ Brent Salisbury <brent.salisbury@docker.com>
Bret Fisher <bret@bretfisher.com>
Brian (bex) Exelbierd <bexelbie@redhat.com>
Brian Goff <cpuguy83@gmail.com>
Brian Wieder <brian@4wieders.com>
Bryan Bess <squarejaw@bsbess.com>
Bryan Boreham <bjboreham@gmail.com>
Bryan Murphy <bmurphy1976@gmail.com>
@@ -95,6 +104,7 @@ Cameron Spear <cameronspear@gmail.com>
Cao Weiwei <cao.weiwei30@zte.com.cn>
Carlo Mion <mion00@gmail.com>
Carlos Alexandro Becker <caarlos0@gmail.com>
Carlos de Paula <me@carlosedp.com>
Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com>
Cezar Sa Espinola <cezarsa@gmail.com>
@@ -128,26 +138,31 @@ Coenraad Loubser <coenraad@wish.org.za>
Colin Hebert <hebert.colin@gmail.com>
Collin Guarino <collin.guarino@gmail.com>
Colm Hally <colmhally@gmail.com>
Comical Derskeal <27731088+derskeal@users.noreply.github.com>
Corey Farrell <git@cfware.com>
Corey Quon <corey.quon@docker.com>
Craig Wilhite <crwilhit@microsoft.com>
Cristian Staretu <cristian.staretu@gmail.com>
Daehyeok Mun <daehyeok@gmail.com>
Dafydd Crosby <dtcrsby@gmail.com>
Daisuke Ito <itodaisuke00@gmail.com>
dalanlan <dalanlan925@gmail.com>
Damien Nadé <github@livna.org>
Dan Cotora <dan@bluevision.ro>
Daniel Artine <daniel.artine@ufrj.br>
Daniel Cassidy <mail@danielcassidy.me.uk>
Daniel Dao <dqminh@cloudflare.com>
Daniel Farrell <dfarrell@redhat.com>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Goosen <daniel.goosen@surveysampling.com>
Daniel Helfand <dhelfand@redhat.com>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Daniel J Walsh <dwalsh@redhat.com>
Daniel Nephin <dnephin@docker.com>
Daniel Norberg <dano@spotify.com>
Daniel Watkins <daniel@daniel-watkins.co.uk>
Daniel Zhang <jmzwcn@gmail.com>
Daniil Nikolenko <qoo2p5@gmail.com>
Danny Berger <dpb587@gmail.com>
Darren Shepherd <darren.s.shepherd@gmail.com>
Darren Stahl <darst@microsoft.com>
@@ -180,13 +195,15 @@ Dima Stopel <dima@twistlock.com>
Dimitry Andric <d.andric@activevideo.com>
Ding Fei <dingfei@stars.org.cn>
Diogo Monica <diogo@docker.com>
Djordje Lukic <djordje.lukic@docker.com>
Dmitry Gusev <dmitry.gusev@gmail.com>
Dmitry Smirnov <onlyjob@member.fsf.org>
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
Dominik Braun <dominik.braun@nbsp.de>
Don Kjer <don.kjer@gmail.com>
Dong Chen <dongluo.chen@docker.com>
Doug Davis <dug@us.ibm.com>
Drew Erny <drew.erny@docker.com>
Drew Erny <derny@mirantis.com>
Ed Costello <epc@epcostello.com>
Elango Sivanandam <elango.siva@docker.com>
Eli Uriegas <eli.uriegas@docker.com>
@@ -249,6 +266,7 @@ Harald Albers <github@albersweb.de>
Harold Cooper <hrldcpr@gmail.com>
Harry Zhang <harryz@hyper.sh>
He Simei <hesimei@zju.edu.cn>
Hector S <hfsam88@gmail.com>
Helen Xie <chenjg@harmonycloud.cn>
Henning Sprang <henning.sprang@gmail.com>
Henry N <henrynmail-github@yahoo.de>
@@ -256,6 +274,7 @@ Hernan Garcia <hernandanielg@gmail.com>
Hongbin Lu <hongbin034@gmail.com>
Hu Keping <hukeping@huawei.com>
Huayi Zhang <irachex@gmail.com>
Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
huqun <huqun@zju.edu.cn>
Huu Nguyen <huu@prismskylabs.com>
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
@@ -297,7 +316,7 @@ Jeremy Unruh <jeremybunruh@gmail.com>
Jeremy Yallop <yallop@docker.com>
Jeroen Franse <jeroenfranse@gmail.com>
Jesse Adametz <jesseadametz@gmail.com>
Jessica Frazelle <jessfraz@google.com>
Jessica Frazelle <jess@oxide.computer>
Jezeniel Zapanta <jpzapanta22@gmail.com>
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
Jie Luo <luo612@zju.edu.cn>
@@ -308,6 +327,7 @@ Jimmy Song <rootsongjc@gmail.com>
jimmyxian <jimmyxian2004@yahoo.com.cn>
Jintao Zhang <zhangjintao9020@gmail.com>
Joao Fernandes <joao.fernandes@docker.com>
Joe Abbey <joe.abbey@gmail.com>
Joe Doliner <jdoliner@pachyderm.io>
Joe Gordon <joe.gordon0@gmail.com>
Joel Handwell <joelhandwell@gmail.com>
@@ -317,7 +337,7 @@ Johan Euphrosine <proppy@google.com>
Johannes 'fish' Ziemke <github@freigeist.org>
John Feminella <jxf@jxf.me>
John Harris <john@johnharris.io>
John Howard (VM) <John.Howard@microsoft.com>
John Howard <github@lowenna.com>
John Laswell <john.n.laswell@gmail.com>
John Maguire <jmaguire@duosecurity.com>
John Mulhausen <john@docker.com>
@@ -326,12 +346,15 @@ John Stephens <johnstep@docker.com>
John Tims <john.k.tims@gmail.com>
John V. Martinez <jvmatl@gmail.com>
John Willis <john.willis@docker.com>
Jon Johnson <jonjohnson@google.com>
Jonatas Baldin <jonatas.baldin@gmail.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jonathan Lee <jonjohn1232009@gmail.com>
Jonathan Lomas <jonathan@floatinglomas.ca>
Jonathan McCrohan <jmccrohan@gmail.com>
Jonh Wendell <jonh.wendell@redhat.com>
Jordan Jennings <jjn2009@gmail.com>
Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com>
Joseph Kern <jkern@semafour.net>
Josh Bodah <jb3689@yahoo.com>
Josh Chorlton <jchorlton@gmail.com>
@@ -369,6 +392,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
Kevin Meredith <kevin.m.meredith@gmail.com>
Kevin Richardson <kevin@kevinrichardson.co>
Kevin Woblick <mail@kovah.de>
khaled souf <khaled.souf@gmail.com>
Kim Eik <kim@heldig.org>
Kir Kolyshkin <kolyshkin@gmail.com>
@@ -406,13 +430,16 @@ Luca Favatella <luca.favatella@erlang-solutions.com>
Luca Marturana <lucamarturana@gmail.com>
Lucas Chan <lucas-github@lucaschan.com>
Luka Hartwig <mail@lukahartwig.de>
Lukas Heeren <lukas-heeren@hotmail.com>
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
Lydell Manganti <LydellManganti@users.noreply.github.com>
Lénaïc Huard <lhuard@amadeus.com>
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
Mabin <bin.ma@huawei.com>
Maciej Kalisz <maciej.d.kalisz@gmail.com>
Madhav Puri <madhav.puri@gmail.com>
Madhu Venugopal <madhu@socketplane.io>
Madhur Batra <madhurbatra097@gmail.com>
Malte Janduda <mail@janduda.net>
Manjunath A Kumatagi <mkumatag@in.ibm.com>
Mansi Nahar <mmn4185@rit.edu>
@@ -422,6 +449,7 @@ Marco Mariani <marco.mariani@alterway.fr>
Marco Vedovati <mvedovati@suse.com>
Marcus Martins <marcus@docker.com>
Marianna Tessel <mtesselh@gmail.com>
Marius Ileana <marius.ileana@gmail.com>
Marius Sturm <marius@graylog.com>
Mark Oates <fl0yd@me.com>
Marsh Macy <marsma@microsoft.com>
@@ -467,12 +495,14 @@ mikelinjie <294893458@qq.com>
Mikhail Vasin <vasin@cloud-tv.ru>
Milind Chawre <milindchawre@gmail.com>
Mindaugas Rukas <momomg@gmail.com>
Miroslav Gula <miroslav.gula@naytrolabs.com>
Misty Stanley-Jones <misty@docker.com>
Mohammad Banikazemi <mb@us.ibm.com>
Mohammed Aaqib Ansari <maaquib@gmail.com>
Mohini Anne Dsouza <mohini3917@gmail.com>
Moorthy RS <rsmoorthy@gmail.com>
Morgan Bauer <mbauer@us.ibm.com>
Morten Hekkvang <morten.hekkvang@sbab.se>
Moysés Borges <moysesb@gmail.com>
Mrunal Patel <mrunalp@gmail.com>
muicoder <muicoder@gmail.com>
@@ -503,9 +533,11 @@ Nishant Totla <nishanttotla@gmail.com>
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
Noah Treuhaft <noah.treuhaft@docker.com>
O.S. Tezer <ostezer@gmail.com>
Odin Ugedal <odin@ugedal.com>
ohmystack <jun.jiang02@ele.me>
Olle Jonsson <olle.jonsson@gmail.com>
Olli Janatuinen <olli.janatuinen@gmail.com>
Oscar Wieman <oscrx@icloud.com>
Otto Kekäläinen <otto@seravo.fi>
Ovidio Mallo <ovidio.mallo@gmail.com>
Pascal Borreli <pascal@borreli.com>
@@ -515,6 +547,7 @@ Patrick Lang <plang@microsoft.com>
Paul <paul9869@gmail.com>
Paul Kehrer <paul.l.kehrer@gmail.com>
Paul Lietar <paul@lietar.net>
Paul Mulders <justinkb@gmail.com>
Paul Weaver <pauweave@cisco.com>
Pavel Pospisil <pospispa@gmail.com>
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
@@ -541,6 +574,7 @@ Qiang Huang <h.huangqiang@huawei.com>
Qinglan Peng <qinglanpeng@zju.edu.cn>
qudongfang <qudongfang@gmail.com>
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Rahul Zoldyck <rahulzoldyck@gmail.com>
Ravi Shekhar Jethani <rsjethani@gmail.com>
Ray Tsang <rayt@google.com>
Reficul <xuzhenglun@gmail.com>
@@ -553,6 +587,7 @@ Richard Scothern <richard.scothern@gmail.com>
Rick Wieman <git@rickw.nl>
Ritesh H Shukla <sritesh@vmware.com>
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
Rob Gulewich <rgulewich@netflix.com>
Robert Wallis <smilingrob@gmail.com>
Robin Naundorf <r.naundorf@fh-muenster.de>
Robin Speekenbrink <robin@kingsquare.nl>
@@ -574,10 +609,14 @@ Sainath Grandhi <sainath.grandhi@intel.com>
Sakeven Jiang <jc5930@sina.cn>
Sally O'Malley <somalley@redhat.com>
Sam Neirinck <sam@samneirinck.com>
Samarth Shah <samashah@microsoft.com>
Sambuddha Basu <sambuddhabasu1@gmail.com>
Sami Tabet <salph.tabet@gmail.com>
Samuel Cochran <sj26@sj26.com>
Samuel Karp <skarp@amazon.com>
Santhosh Manohar <santhosh@docker.com>
Sargun Dhillon <sargun@netflix.com>
Saswat Bhattacharya <sas.saswat@gmail.com>
Scott Brenner <scott@scottbrenner.me>
Scott Collier <emailscottcollier@gmail.com>
Sean Christopherson <sean.j.christopherson@intel.com>
@@ -598,6 +637,7 @@ sidharthamani <sid@rancher.com>
Silvin Lubecki <silvin.lubecki@docker.com>
Simei He <hesimei@zju.edu.cn>
Simon Ferquel <simon.ferquel@docker.com>
Simon Heimberg <simon.heimberg@heimberg-ea.ch>
Sindhu S <sindhus@live.in>
Slava Semushin <semushin@redhat.com>
Solomon Hykes <solomon@docker.com>
@@ -627,7 +667,10 @@ TAGOMORI Satoshi <tagomoris@gmail.com>
taiji-tech <csuhqg@foxmail.com>
Taylor Jones <monitorjbl@gmail.com>
Tejaswini Duggaraju <naduggar@microsoft.com>
Tengfei Wang <tfwang@alauda.io>
Teppei Fukuda <knqyf263@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thibault Coupin <thibault.coupin@gmail.com>
Thomas Gazagnaire <thomas@gazagnaire.org>
Thomas Krzero <thomas.kovatchitch@gmail.com>
Thomas Leonard <thomas.leonard@docker.com>
@@ -639,6 +682,7 @@ Tianyi Wang <capkurmagati@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tim Dettrick <t.dettrick@uq.edu.au>
Tim Hockin <thockin@google.com>
Tim Sampson <tim@sampson.fi>
Tim Smith <timbot@google.com>
Tim Waugh <twaugh@redhat.com>
Tim Wraight <tim.wraight@tangentlabs.co.uk>
@@ -663,9 +707,11 @@ Tristan Carel <tristan@cogniteev.com>
Tycho Andersen <tycho@docker.com>
Tycho Andersen <tycho@tycho.ws>
uhayate <uhayate.gong@daocloud.io>
Ulrich Bareth <ulrich.bareth@gmail.com>
Ulysses Souza <ulysses.souza@docker.com>
Umesh Yadav <umesh4257@gmail.com>
Valentin Lorentz <progval+git@progval.net>
Venkateswara Reddy Bukkasamudram <bukkasamudram@outlook.com>
Veres Lajos <vlajos@gmail.com>
Victor Vieux <victor.vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
@@ -683,6 +729,7 @@ Wang Long <long.wanglong@huawei.com>
Wang Ping <present.wp@icloud.com>
Wang Xing <hzwangxing@corp.netease.com>
Wang Yuexiao <wang.yuexiao@zte.com.cn>
Wang Yumu <37442693@qq.com>
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
Wayne Song <wsong@docker.com>
Wen Cheng Ma <wenchma@cn.ibm.com>
@@ -691,6 +738,7 @@ Wes Morgan <cap10morgan@gmail.com>
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
William Henry <whenry@redhat.com>
Xianglin Gao <xlgao@zju.edu.cn>
Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com>
Xiaoxi He <xxhe@alauda.io>
Xinbo Weng <xihuanbo_0521@zju.edu.cn>

View File

@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
@@ -23,10 +24,15 @@ const (
)
var (
configDir = os.Getenv("DOCKER_CONFIG")
initConfigDir sync.Once
configDir string
)
func init() {
func setConfigDir() {
if configDir != "" {
return
}
configDir = os.Getenv("DOCKER_CONFIG")
if configDir == "" {
configDir = filepath.Join(homedir.Get(), configFileDir)
}
@@ -34,6 +40,7 @@ func init() {
// Dir returns the directory the configuration file is stored in
func Dir() string {
initConfigDir.Do(setConfigDir)
return configDir
}
@@ -88,11 +95,7 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
configFile := configfile.New(filename)
// Try happy path first - latest config file
if _, err := os.Stat(filename); err == nil {
file, err := os.Open(filename)
if err != nil {
return configFile, errors.Wrap(err, filename)
}
if file, err := os.Open(filename); err == nil {
defer file.Close()
err = configFile.LoadFromReader(file)
if err != nil {
@@ -106,22 +109,16 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
}
// Can't find latest config file so check for the old one
homedir, err := os.UserHomeDir()
home, err := os.UserHomeDir()
if err != nil {
return configFile, errors.Wrap(err, oldConfigfile)
}
confFile := filepath.Join(homedir, oldConfigfile)
if _, err := os.Stat(confFile); err != nil {
return configFile, nil // missing file is not an error
}
file, err := os.Open(confFile)
if err != nil {
return configFile, errors.Wrap(err, filename)
}
defer file.Close()
err = configFile.LegacyLoadFromReader(file)
if err != nil {
return configFile, errors.Wrap(err, filename)
filename = filepath.Join(home, oldConfigfile)
if file, err := os.Open(filename); err == nil {
defer file.Close()
if err := configFile.LegacyLoadFromReader(file); err != nil {
return configFile, errors.Wrap(err, filename)
}
}
return configFile, nil
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/cli/cli/config/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
@@ -118,7 +119,7 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
// LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) {
return err
}
var err error
@@ -168,6 +169,13 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
// User-Agent header is automatically set, and should not be stored in the configuration
for v := range configFile.HTTPHeaders {
if strings.EqualFold(v, "User-Agent") {
delete(configFile.HTTPHeaders, v)
}
}
data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil {
return err
@@ -177,7 +185,7 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
}
// Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() error {
func (configFile *ConfigFile) Save() (retErr error) {
if configFile.Filename == "" {
return errors.Errorf("Can't save config with empty filename")
}
@@ -190,16 +198,33 @@ func (configFile *ConfigFile) Save() error {
if err != nil {
return err
}
defer func() {
temp.Close()
if retErr != nil {
if err := os.Remove(temp.Name()); err != nil {
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
}
}
}()
err = configFile.SaveToWriter(temp)
temp.Close()
if err != nil {
os.Remove(temp.Name())
return err
}
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(configFile.Filename, temp.Name())
return os.Rename(temp.Name(), configFile.Filename)
if err := temp.Close(); err != nil {
return errors.Wrap(err, "error closing temp file")
}
// Handle situation where the configfile is a symlink
cfgFile := configFile.Filename
if f, err := os.Readlink(cfgFile); err == nil {
cfgFile = f
}
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(cfgFile, temp.Name())
return os.Rename(temp.Name(), cfgFile)
}
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and

View File

@@ -0,0 +1,106 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
)
func init() {
// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
// environment not in the chroot from untrusted files.
_, _ = user.Lookup("docker")
_, _ = net.LookupHost("localhost")
}
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver {
if idMapping == nil {
idMapping = &idtools.IdentityMapping{}
}
return &archive.Archiver{
Untar: Untar,
IDMapping: idMapping,
}
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true, dest)
}
// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
// The root directory is the directory that will be chrooted to.
// `dest` must be a path within `root`, if it is not an error will be returned.
//
// `root` should set to a directory which is not controlled by any potentially
// malicious process.
//
// This should be used to prevent a potential attacker from manipulating `dest`
// such that it would provide access to files outside of `dest` through things
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
// sanitizing symlinks in this manner is inherrently racey:
// ref: CVE-2018-15664
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
return untarHandler(tarArchive, dest, options, true, root)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, false, dest)
}
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
if options == nil {
options = &archive.TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMapping.RootPair()
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
return err
}
}
r := ioutil.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
return err
}
defer decompressedArchive.Close()
r = decompressedArchive
}
return invokeUnpack(r, dest, options, root)
}
// Tar tars the requested path while chrooted to the specified root.
func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
if options == nil {
options = &archive.TarOptions{}
}
return invokePack(srcPath, options, root)
}

View File

@@ -0,0 +1,208 @@
// +build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"github.com/pkg/errors"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
func untar() {
runtime.LockOSThread()
flag.Parse()
var options archive.TarOptions
// read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
dst := flag.Arg(0)
var root string
if len(flag.Args()) > 1 {
root = flag.Arg(1)
}
if root == "" {
root = dst
}
if err := chroot(root); err != nil {
fatal(err)
}
if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
if root == "" {
return errors.New("must specify a root to chroot to")
}
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
// when the full image list is passed (e.g. when this is used by
// `docker load`). We will marshall the options via a pipe to the
// child
r, w, err := os.Pipe()
if err != nil {
return fmt.Errorf("Untar pipe failure: %v", err)
}
if root != "" {
relDest, err := filepath.Rel(root, dest)
if err != nil {
return err
}
if relDest == "." {
relDest = "/"
}
if relDest[0] != '/' {
relDest = "/" + relDest
}
dest = relDest
}
cmd := reexec.Command("docker-untar", dest, root)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
// write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
}
return nil
}
func tar() {
runtime.LockOSThread()
flag.Parse()
src := flag.Arg(0)
var root string
if len(flag.Args()) > 1 {
root = flag.Arg(1)
}
if root == "" {
root = src
}
if err := realChroot(root); err != nil {
fatal(err)
}
var options archive.TarOptions
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
fatal(err)
}
rdr, err := archive.TarWithOptions(src, &options)
if err != nil {
fatal(err)
}
defer rdr.Close()
if _, err := io.Copy(os.Stdout, rdr); err != nil {
fatal(err)
}
os.Exit(0)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
if root == "" {
return nil, errors.New("root path must not be empty")
}
relSrc, err := filepath.Rel(root, srcPath)
if err != nil {
return nil, err
}
if relSrc == "." {
relSrc = "/"
}
if relSrc[0] != '/' {
relSrc = "/" + relSrc
}
// make sure we didn't trim a trailing slash with the call to `Rel`
if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
relSrc += "/"
}
cmd := reexec.Command("docker-tar", relSrc, root)
errBuff := bytes.NewBuffer(nil)
cmd.Stderr = errBuff
tarR, tarW := io.Pipe()
cmd.Stdout = tarW
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, errors.Wrap(err, "error getting options pipe for tar process")
}
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, "tar error on re-exec cmd")
}
go func() {
err := cmd.Wait()
err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
tarW.CloseWithError(err)
}()
if err := json.NewEncoder(stdin).Encode(options); err != nil {
stdin.Close()
return nil, errors.Wrap(err, "tar json encode to pipe failed")
}
stdin.Close()
return tarR, nil
}

View File

@@ -0,0 +1,29 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"io"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// chroot is not supported by Windows
func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the pack. We call inline instead within the daemon process.
return archive.TarWithOptions(srcPath, options)
}

View File

@@ -0,0 +1,114 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/moby/sys/mount"
"github.com/moby/sys/mountinfo"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
// chroot on linux uses pivot_root instead of chroot
// pivot_root takes a new root and an old root.
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
// New root is where the new rootfs is set to.
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
// if the engine is running in a user namespace we need to use actual chroot
if rsystem.RunningInUserNS() {
return realChroot(path)
}
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
}
// Make everything in new ns slave.
// Don't use `private` here as this could race where the mountns gets a
// reference to a mount and an unmount from the host does not propagate,
// which could potentially cause transient errors for other operations,
// even though this should be relatively small window here `slave` should
// not cause any problems.
if err := mount.MakeRSlave("/"); err != nil {
return err
}
if mounted, _ := mountinfo.Mounted(path); !mounted {
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
return realChroot(path)
}
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}
var mounted bool
defer func() {
if mounted {
// make sure pivotDir is not mounted before we try to remove it
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
if err == nil {
err = errCleanup
}
return
}
}
errCleanup := os.Remove(pivotDir)
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
if err == nil {
err = errCleanup
}
}
}()
if err := unix.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
}
return realChroot(path)
}
mounted = true
// This is the new path for where the old root (prior to the pivot) has been moved to
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := unix.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root: %v", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
return fmt.Errorf("Error making old root private after pivot: %v", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
}
mounted = false
return nil
}
func realChroot(path string) error {
if err := unix.Chroot(path); err != nil {
return fmt.Errorf("Error after fallback to chroot: %v", err)
}
if err := unix.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root after chroot: %v", err)
}
return nil
}

View File

@@ -0,0 +1,16 @@
// +build !windows,!linux
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import "golang.org/x/sys/unix"
func chroot(path string) error {
if err := unix.Chroot(path); err != nil {
return err
}
return unix.Chdir("/")
}
func realChroot(path string) error {
return chroot(path)
}

View File

@@ -0,0 +1,23 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"io"
"github.com/docker/docker/pkg/archive"
)
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can only be
// uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}

View File

@@ -0,0 +1,130 @@
//+build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
type applyLayerResponse struct {
LayerSize int64 `json:"layerSize"`
}
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
// used on Windows as it does not support chroot, hence no point sandboxing
// through chroot and rexec.
func applyLayer() {
var (
tmpDir string
err error
options *archive.TarOptions
)
runtime.LockOSThread()
flag.Parse()
inUserns := rsystem.RunningInUserNS()
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
// We need to be able to set any perms
oldmask, err := system.Umask(0)
defer system.Umask(oldmask)
if err != nil {
fatal(err)
}
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)
}
if inUserns {
options.InUserNS = true
}
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
size, err := archive.UnpackLayer("/", os.Stdin, options)
os.RemoveAll(tmpDir)
if err != nil {
fatal(err)
}
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
}
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
if options == nil {
options = &archive.TarOptions{}
if rsystem.RunningInUserNS() {
options.InUserNS = true
}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
}
cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
}
return response.LayerSize, nil
}

View File

@@ -0,0 +1,45 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
// Ensure it is a Windows-style volume path
dest = longpath.AddPrefix(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
}
s, err := archive.UnpackLayer(dest, layer, nil)
os.RemoveAll(tmpDir)
if err != nil {
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
}
return s, nil
}

View File

@@ -0,0 +1,29 @@
// +build !windows
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Register("docker-applyLayer", applyLayer)
reexec.Register("docker-untar", untar)
reexec.Register("docker-tar", tar)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
return io.Copy(ioutil.Discard, r)
}

View File

@@ -0,0 +1,4 @@
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
func init() {
}

65
vendor/github.com/docker/docker/pkg/locker/README.md generated vendored Normal file
View File

@@ -0,0 +1,65 @@
Locker
=====
locker provides a mechanism for creating finer-grained locking to help
free up more global locks to handle other tasks.
The implementation looks close to a sync.Mutex, however, the user must provide a
reference to use to refer to the underlying lock when locking and unlocking,
and unlock may generate an error.
If a lock with a given name does not exist when `Lock` is called, one is
created.
Lock references are automatically cleaned up on `Unlock` if nothing else is
waiting for the lock.
## Usage
```go
package important
import (
"sync"
"time"
"github.com/docker/docker/pkg/locker"
)
type important struct {
locks *locker.Locker
data map[string]interface{}
mu sync.Mutex
}
func (i *important) Get(name string) interface{} {
i.locks.Lock(name)
defer i.locks.Unlock(name)
return i.data[name]
}
func (i *important) Create(name string, data interface{}) {
i.locks.Lock(name)
defer i.locks.Unlock(name)
i.createImportant(data)
i.mu.Lock()
i.data[name] = data
i.mu.Unlock()
}
func (i *important) createImportant(data interface{}) {
time.Sleep(10 * time.Second)
}
```
For functions dealing with a given name, always lock at the beginning of the
function (or before doing anything with the underlying state), this ensures any
other function that is dealing with the same name will block.
When needing to modify the underlying data, use the global lock to ensure nothing
else is modifying it at the same time.
Since name lock is already in place, no reads will occur while the modification
is being performed.

112
vendor/github.com/docker/docker/pkg/locker/locker.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
/*
Package locker provides a mechanism for creating finer-grained locking to help
free up more global locks to handle other tasks.
The implementation looks close to a sync.Mutex, however the user must provide a
reference to use to refer to the underlying lock when locking and unlocking,
and unlock may generate an error.
If a lock with a given name does not exist when `Lock` is called, one is
created.
Lock references are automatically cleaned up on `Unlock` if nothing else is
waiting for the lock.
*/
package locker // import "github.com/docker/docker/pkg/locker"
import (
"errors"
"sync"
"sync/atomic"
)
// ErrNoSuchLock is returned when the requested lock does not exist
var ErrNoSuchLock = errors.New("no such lock")
// Locker provides a locking mechanism based on the passed in reference name
type Locker struct {
mu sync.Mutex
locks map[string]*lockCtr
}
// lockCtr is used by Locker to represent a lock with a given name.
type lockCtr struct {
mu sync.Mutex
// waiters is the number of waiters waiting to acquire the lock
// this is int32 instead of uint32 so we can add `-1` in `dec()`
waiters int32
}
// inc increments the number of waiters waiting for the lock
func (l *lockCtr) inc() {
atomic.AddInt32(&l.waiters, 1)
}
// dec decrements the number of waiters waiting on the lock
func (l *lockCtr) dec() {
atomic.AddInt32(&l.waiters, -1)
}
// count gets the current number of waiters
func (l *lockCtr) count() int32 {
return atomic.LoadInt32(&l.waiters)
}
// Lock locks the mutex
func (l *lockCtr) Lock() {
l.mu.Lock()
}
// Unlock unlocks the mutex
func (l *lockCtr) Unlock() {
l.mu.Unlock()
}
// New creates a new Locker
func New() *Locker {
return &Locker{
locks: make(map[string]*lockCtr),
}
}
// Lock locks a mutex with the given name. If it doesn't exist, one is created
func (l *Locker) Lock(name string) {
l.mu.Lock()
if l.locks == nil {
l.locks = make(map[string]*lockCtr)
}
nameLock, exists := l.locks[name]
if !exists {
nameLock = &lockCtr{}
l.locks[name] = nameLock
}
// increment the nameLock waiters while inside the main mutex
// this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
nameLock.inc()
l.mu.Unlock()
// Lock the nameLock outside the main mutex so we don't block other operations
// once locked then we can decrement the number of waiters for this lock
nameLock.Lock()
nameLock.dec()
}
// Unlock unlocks the mutex with the given name
// If the given lock is not being waited on by any other callers, it is deleted
func (l *Locker) Unlock(name string) error {
l.mu.Lock()
nameLock, exists := l.locks[name]
if !exists {
l.mu.Unlock()
return ErrNoSuchLock
}
if nameLock.count() == 0 {
delete(l.locks, name)
}
nameLock.Unlock()
l.mu.Unlock()
return nil
}

5
vendor/github.com/docker/docker/pkg/reexec/README.md generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# reexec
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
the exec of the binary will be used to find and execute custom init paths.

View File

@@ -0,0 +1,28 @@
package reexec // import "github.com/docker/docker/pkg/reexec"
import (
"os/exec"
"syscall"
"golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
// Returns "/proc/self/exe".
func Self() string {
return "/proc/self/exe"
}
// Command returns *exec.Cmd which has Path as current binary. Also it setting
// SysProcAttr.Pdeathsig to SIGTERM.
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
return &exec.Cmd{
Path: Self(),
Args: args,
SysProcAttr: &syscall.SysProcAttr{
Pdeathsig: unix.SIGTERM,
},
}
}

View File

@@ -0,0 +1,23 @@
// +build freebsd darwin
package reexec // import "github.com/docker/docker/pkg/reexec"
import (
"os/exec"
)
// Self returns the path to the current process's binary.
// Uses os.Args[0].
func Self() string {
return naiveSelf()
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
return &exec.Cmd{
Path: Self(),
Args: args,
}
}

View File

@@ -0,0 +1,16 @@
// +build !linux,!windows,!freebsd,!darwin
package reexec // import "github.com/docker/docker/pkg/reexec"
import (
"os/exec"
)
func Self() string {
return ""
}
// Command is unsupported on operating systems apart from Linux, Windows, and Darwin.
func Command(args ...string) *exec.Cmd {
return nil
}

View File

@@ -0,0 +1,21 @@
package reexec // import "github.com/docker/docker/pkg/reexec"
import (
"os/exec"
)
// Self returns the path to the current process's binary.
// Uses os.Args[0].
func Self() string {
return naiveSelf()
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd {
return &exec.Cmd{
Path: Self(),
Args: args,
}
}

47
vendor/github.com/docker/docker/pkg/reexec/reexec.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
package reexec // import "github.com/docker/docker/pkg/reexec"
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
var registeredInitializers = make(map[string]func())
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
if _, exists := registeredInitializers[name]; exists {
panic(fmt.Sprintf("reexec func already registered under name %q", name))
}
registeredInitializers[name] = initializer
}
// Init is called as the first part of the exec process and returns true if an
// initialization function was called.
func Init() bool {
initializer, exists := registeredInitializers[os.Args[0]]
if exists {
initializer()
return true
}
return false
}
func naiveSelf() string {
name := os.Args[0]
if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil {
return lp
}
}
// handle conversion of relative paths to absolute
if absName, err := filepath.Abs(name); err == nil {
return absName
}
// if we couldn't get absolute name, return original
// (NOTE: Go only errors on Abs() if os.Getwd fails)
return name
}

24
vendor/github.com/docker/go-events/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

70
vendor/github.com/docker/go-events/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,70 @@
# Contributing to Docker open source projects
Want to hack on go-events? Awesome! Here are instructions to get you started.
go-events is part of the [Docker](https://www.docker.com) project, and
follows the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read Docker's
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
For an in-depth description of our contribution process, visit the
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
### Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

201
vendor/github.com/docker/go-events/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

46
vendor/github.com/docker/go-events/MAINTAINERS generated vendored Normal file
View File

@@ -0,0 +1,46 @@
# go-events maintainers file
#
# This file describes who runs the docker/go-events project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"aaronlehmann",
"aluzzardi",
"lk4d4",
"stevvooe",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.aluzzardi]
Name = "Andrea Luzzardi"
Email = "al@docker.com"
GitHub = "aluzzardi"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

117
vendor/github.com/docker/go-events/README.md generated vendored Normal file
View File

@@ -0,0 +1,117 @@
# Docker Events Package
[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events)
[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events)
The Docker `events` package implements a composable event distribution package
for Go.
Originally created to implement the [notifications in Docker Registry
2](https://github.com/docker/distribution/blob/master/docs/notifications.md),
we've found the pattern to be useful in other applications. This package is
most of the same code with slightly updated interfaces. Much of the internals
have been made available.
## Usage
The `events` package centers around a `Sink` type. Events are written with
calls to `Sink.Write(event Event)`. Sinks can be wired up in various
configurations to achieve interesting behavior.
The canonical example is that employed by the
[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications)
package. Let's say we have a type `httpSink` where we'd like to queue
notifications. As a rule, it should send a single http request and return an
error if it fails:
```go
func (h *httpSink) Write(event Event) error {
p, err := json.Marshal(event)
if err != nil {
return err
}
body := bytes.NewReader(p)
resp, err := h.client.Post(h.url, "application/json", body)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.Status != 200 {
return errors.New("unexpected status")
}
return nil
}
// implement (*httpSink).Close()
```
With just that, we can start using components from this package. One can call
`(*httpSink).Write` to send events as the body of a post request to a
configured URL.
### Retries
HTTP can be unreliable. The first feature we'd like is to have some retry:
```go
hs := newHTTPSink(/*...*/)
retry := NewRetryingSink(hs, NewBreaker(5, time.Second))
```
We now have a sink that will retry events against the `httpSink` until they
succeed. The retry will backoff for one second after 5 consecutive failures
using the breaker strategy.
### Queues
This isn't quite enough. We we want a sink that doesn't block while we are
waiting for events to be sent. Let's add a `Queue`:
```go
queue := NewQueue(retry)
```
Now, we have an unbounded queue that will work through all events sent with
`(*Queue).Write`. Events can be added asynchronously to the queue without
blocking the current execution path. This is ideal for use in an http request.
### Broadcast
It usually turns out that you want to send to more than one listener. We can
use `Broadcaster` to support this:
```go
var broadcast = NewBroadcaster() // make it available somewhere in your application.
broadcast.Add(queue) // add your queue!
broadcast.Add(queue2) // and another!
```
With the above, we can now call `broadcast.Write` in our http handlers and have
all the events distributed to each queue. Because the events are queued, not
listener blocks another.
### Extending
For the most part, the above is sufficient for a lot of applications. However,
extending the above functionality can be done implementing your own `Sink`. The
behavior and semantics of the sink can be completely dependent on the
application requirements. The interface is provided below for reference:
```go
type Sink {
Write(Event) error
Close() error
}
```
Application behavior can be controlled by how `Write` behaves. The examples
above are designed to queue the message and return as quickly as possible.
Other implementations may block until the event is committed to durable
storage.
## Copyright and license
Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License,
Version 2.0. See [LICENSE](LICENSE) for the full license text.

178
vendor/github.com/docker/go-events/broadcast.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
package events
import (
"fmt"
"sync"
"github.com/sirupsen/logrus"
)
// Broadcaster sends events to multiple, reliable Sinks. The goal of this
// component is to dispatch events to configured endpoints. Reliability can be
// provided by wrapping incoming sinks.
type Broadcaster struct {
sinks []Sink
events chan Event
adds chan configureRequest
removes chan configureRequest
shutdown chan struct{}
closed chan struct{}
once sync.Once
}
// NewBroadcaster appends one or more sinks to the list of sinks. The
// broadcaster behavior will be affected by the properties of the sink.
// Generally, the sink should accept all messages and deal with reliability on
// its own. Use of EventQueue and RetryingSink should be used here.
func NewBroadcaster(sinks ...Sink) *Broadcaster {
b := Broadcaster{
sinks: sinks,
events: make(chan Event),
adds: make(chan configureRequest),
removes: make(chan configureRequest),
shutdown: make(chan struct{}),
closed: make(chan struct{}),
}
// Start the broadcaster
go b.run()
return &b
}
// Write accepts an event to be dispatched to all sinks. This method will never
// fail and should never block (hopefully!). The caller cedes the memory to the
// broadcaster and should not modify it after calling write.
func (b *Broadcaster) Write(event Event) error {
select {
case b.events <- event:
case <-b.closed:
return ErrSinkClosed
}
return nil
}
// Add the sink to the broadcaster.
//
// The provided sink must be comparable with equality. Typically, this just
// works with a regular pointer type.
func (b *Broadcaster) Add(sink Sink) error {
return b.configure(b.adds, sink)
}
// Remove the provided sink.
func (b *Broadcaster) Remove(sink Sink) error {
return b.configure(b.removes, sink)
}
type configureRequest struct {
sink Sink
response chan error
}
func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error {
response := make(chan error, 1)
for {
select {
case ch <- configureRequest{
sink: sink,
response: response}:
ch = nil
case err := <-response:
return err
case <-b.closed:
return ErrSinkClosed
}
}
}
// Close the broadcaster, ensuring that all messages are flushed to the
// underlying sink before returning.
func (b *Broadcaster) Close() error {
b.once.Do(func() {
close(b.shutdown)
})
<-b.closed
return nil
}
// run is the main broadcast loop, started when the broadcaster is created.
// Under normal conditions, it waits for events on the event channel. After
// Close is called, this goroutine will exit.
func (b *Broadcaster) run() {
defer close(b.closed)
remove := func(target Sink) {
for i, sink := range b.sinks {
if sink == target {
b.sinks = append(b.sinks[:i], b.sinks[i+1:]...)
break
}
}
}
for {
select {
case event := <-b.events:
for _, sink := range b.sinks {
if err := sink.Write(event); err != nil {
if err == ErrSinkClosed {
// remove closed sinks
remove(sink)
continue
}
logrus.WithField("event", event).WithField("events.sink", sink).WithError(err).
Errorf("broadcaster: dropping event")
}
}
case request := <-b.adds:
// while we have to iterate for add/remove, common iteration for
// send is faster against slice.
var found bool
for _, sink := range b.sinks {
if request.sink == sink {
found = true
break
}
}
if !found {
b.sinks = append(b.sinks, request.sink)
}
// b.sinks[request.sink] = struct{}{}
request.response <- nil
case request := <-b.removes:
remove(request.sink)
request.response <- nil
case <-b.shutdown:
// close all the underlying sinks
for _, sink := range b.sinks {
if err := sink.Close(); err != nil && err != ErrSinkClosed {
logrus.WithField("events.sink", sink).WithError(err).
Errorf("broadcaster: closing sink failed")
}
}
return
}
}
}
func (b *Broadcaster) String() string {
// Serialize copy of this broadcaster without the sync.Once, to avoid
// a data race.
b2 := map[string]interface{}{
"sinks": b.sinks,
"events": b.events,
"adds": b.adds,
"removes": b.removes,
"shutdown": b.shutdown,
"closed": b.closed,
}
return fmt.Sprint(b2)
}

61
vendor/github.com/docker/go-events/channel.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
package events
import (
"fmt"
"sync"
)
// Channel provides a sink that can be listened on. The writer and channel
// listener must operate in separate goroutines.
//
// Consumers should listen on Channel.C until Closed is closed.
type Channel struct {
C chan Event
closed chan struct{}
once sync.Once
}
// NewChannel returns a channel. If buffer is zero, the channel is
// unbuffered.
func NewChannel(buffer int) *Channel {
return &Channel{
C: make(chan Event, buffer),
closed: make(chan struct{}),
}
}
// Done returns a channel that will always proceed once the sink is closed.
func (ch *Channel) Done() chan struct{} {
return ch.closed
}
// Write the event to the channel. Must be called in a separate goroutine from
// the listener.
func (ch *Channel) Write(event Event) error {
select {
case ch.C <- event:
return nil
case <-ch.closed:
return ErrSinkClosed
}
}
// Close the channel sink.
func (ch *Channel) Close() error {
ch.once.Do(func() {
close(ch.closed)
})
return nil
}
func (ch *Channel) String() string {
// Serialize a copy of the Channel that doesn't contain the sync.Once,
// to avoid a data race.
ch2 := map[string]interface{}{
"C": ch.C,
"closed": ch.closed,
}
return fmt.Sprint(ch2)
}

10
vendor/github.com/docker/go-events/errors.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package events
import "fmt"
var (
// ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and
// retries will not be successful.
ErrSinkClosed = fmt.Errorf("events: sink closed")
)

15
vendor/github.com/docker/go-events/event.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
package events
// Event marks items that can be sent as events.
type Event interface{}
// Sink accepts and sends events.
type Sink interface {
// Write an event to the Sink. If no error is returned, the caller will
// assume that all events have been committed to the sink. If an error is
// received, the caller may retry sending the event.
Write(event Event) error
// Close the sink, possibly waiting for pending events to flush.
Close() error
}

52
vendor/github.com/docker/go-events/filter.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package events
// Matcher matches events.
type Matcher interface {
Match(event Event) bool
}
// MatcherFunc implements matcher with just a function.
type MatcherFunc func(event Event) bool
// Match calls the wrapped function.
func (fn MatcherFunc) Match(event Event) bool {
return fn(event)
}
// Filter provides an event sink that sends only events that are accepted by a
// Matcher. No methods on filter are goroutine safe.
type Filter struct {
dst Sink
matcher Matcher
closed bool
}
// NewFilter returns a new filter that will send to events to dst that return
// true for Matcher.
func NewFilter(dst Sink, matcher Matcher) Sink {
return &Filter{dst: dst, matcher: matcher}
}
// Write an event to the filter.
func (f *Filter) Write(event Event) error {
if f.closed {
return ErrSinkClosed
}
if f.matcher.Match(event) {
return f.dst.Write(event)
}
return nil
}
// Close the filter and allow no more events to pass through.
func (f *Filter) Close() error {
// TODO(stevvooe): Not all sinks should have Close.
if f.closed {
return nil
}
f.closed = true
return f.dst.Close()
}

111
vendor/github.com/docker/go-events/queue.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
package events
import (
"container/list"
"sync"
"github.com/sirupsen/logrus"
)
// Queue accepts all messages into a queue for asynchronous consumption
// by a sink. It is unbounded and thread safe but the sink must be reliable or
// events will be dropped.
type Queue struct {
dst Sink
events *list.List
cond *sync.Cond
mu sync.Mutex
closed bool
}
// NewQueue returns a queue to the provided Sink dst.
func NewQueue(dst Sink) *Queue {
eq := Queue{
dst: dst,
events: list.New(),
}
eq.cond = sync.NewCond(&eq.mu)
go eq.run()
return &eq
}
// Write accepts the events into the queue, only failing if the queue has
// been closed.
func (eq *Queue) Write(event Event) error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return ErrSinkClosed
}
eq.events.PushBack(event)
eq.cond.Signal() // signal waiters
return nil
}
// Close shutsdown the event queue, flushing
func (eq *Queue) Close() error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return nil
}
// set closed flag
eq.closed = true
eq.cond.Signal() // signal flushes queue
eq.cond.Wait() // wait for signal from last flush
return eq.dst.Close()
}
// run is the main goroutine to flush events to the target sink.
func (eq *Queue) run() {
for {
event := eq.next()
if event == nil {
return // nil block means event queue is closed.
}
if err := eq.dst.Write(event); err != nil {
// TODO(aaronl): Dropping events could be bad depending
// on the application. We should have a way of
// communicating this condition. However, logging
// at a log level above debug may not be appropriate.
// Eventually, go-events should not use logrus at all,
// and should bubble up conditions like this through
// error values.
logrus.WithFields(logrus.Fields{
"event": event,
"sink": eq.dst,
}).WithError(err).Debug("eventqueue: dropped event")
}
}
}
// next encompasses the critical section of the run loop. When the queue is
// empty, it will block on the condition. If new data arrives, it will wake
// and return a block. When closed, a nil slice will be returned.
func (eq *Queue) next() Event {
eq.mu.Lock()
defer eq.mu.Unlock()
for eq.events.Len() < 1 {
if eq.closed {
eq.cond.Broadcast()
return nil
}
eq.cond.Wait()
}
front := eq.events.Front()
block := front.Value.(Event)
eq.events.Remove(front)
return block
}

260
vendor/github.com/docker/go-events/retry.go generated vendored Normal file
View File

@@ -0,0 +1,260 @@
package events
import (
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
)
// RetryingSink retries the write until success or an ErrSinkClosed is
// returned. Underlying sink must have p > 0 of succeeding or the sink will
// block. Retry is configured with a RetryStrategy. Concurrent calls to a
// retrying sink are serialized through the sink, meaning that if one is
// in-flight, another will not proceed.
type RetryingSink struct {
sink Sink
strategy RetryStrategy
closed chan struct{}
once sync.Once
}
// NewRetryingSink returns a sink that will retry writes to a sink, backing
// off on failure. Parameters threshold and backoff adjust the behavior of the
// circuit breaker.
func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink {
rs := &RetryingSink{
sink: sink,
strategy: strategy,
closed: make(chan struct{}),
}
return rs
}
// Write attempts to flush the events to the downstream sink until it succeeds
// or the sink is closed.
func (rs *RetryingSink) Write(event Event) error {
logger := logrus.WithField("event", event)
retry:
select {
case <-rs.closed:
return ErrSinkClosed
default:
}
if backoff := rs.strategy.Proceed(event); backoff > 0 {
select {
case <-time.After(backoff):
// TODO(stevvooe): This branch holds up the next try. Before, we
// would simply break to the "retry" label and then possibly wait
// again. However, this requires all retry strategies to have a
// large probability of probing the sync for success, rather than
// just backing off and sending the request.
case <-rs.closed:
return ErrSinkClosed
}
}
if err := rs.sink.Write(event); err != nil {
if err == ErrSinkClosed {
// terminal!
return err
}
logger := logger.WithError(err) // shadow!!
if rs.strategy.Failure(event, err) {
logger.Errorf("retryingsink: dropped event")
return nil
}
logger.Errorf("retryingsink: error writing event, retrying")
goto retry
}
rs.strategy.Success(event)
return nil
}
// Close closes the sink and the underlying sink.
func (rs *RetryingSink) Close() error {
rs.once.Do(func() {
close(rs.closed)
})
return nil
}
func (rs *RetryingSink) String() string {
// Serialize a copy of the RetryingSink without the sync.Once, to avoid
// a data race.
rs2 := map[string]interface{}{
"sink": rs.sink,
"strategy": rs.strategy,
"closed": rs.closed,
}
return fmt.Sprint(rs2)
}
// RetryStrategy defines a strategy for retrying event sink writes.
//
// All methods should be goroutine safe.
type RetryStrategy interface {
// Proceed is called before every event send. If proceed returns a
// positive, non-zero integer, the retryer will back off by the provided
// duration.
//
// An event is provided, by may be ignored.
Proceed(event Event) time.Duration
// Failure reports a failure to the strategy. If this method returns true,
// the event should be dropped.
Failure(event Event, err error) bool
// Success should be called when an event is sent successfully.
Success(event Event)
}
// Breaker implements a circuit breaker retry strategy.
//
// The current implementation never drops events.
type Breaker struct {
threshold int
recent int
last time.Time
backoff time.Duration // time after which we retry after failure.
mu sync.Mutex
}
var _ RetryStrategy = &Breaker{}
// NewBreaker returns a breaker that will backoff after the threshold has been
// tripped. A Breaker is thread safe and may be shared by many goroutines.
func NewBreaker(threshold int, backoff time.Duration) *Breaker {
return &Breaker{
threshold: threshold,
backoff: backoff,
}
}
// Proceed checks the failures against the threshold.
func (b *Breaker) Proceed(event Event) time.Duration {
b.mu.Lock()
defer b.mu.Unlock()
if b.recent < b.threshold {
return 0
}
return b.last.Add(b.backoff).Sub(time.Now())
}
// Success resets the breaker.
func (b *Breaker) Success(event Event) {
b.mu.Lock()
defer b.mu.Unlock()
b.recent = 0
b.last = time.Time{}
}
// Failure records the failure and latest failure time.
func (b *Breaker) Failure(event Event, err error) bool {
b.mu.Lock()
defer b.mu.Unlock()
b.recent++
b.last = time.Now().UTC()
return false // never drop events.
}
var (
// DefaultExponentialBackoffConfig provides a default configuration for
// exponential backoff.
DefaultExponentialBackoffConfig = ExponentialBackoffConfig{
Base: time.Second,
Factor: time.Second,
Max: 20 * time.Second,
}
)
// ExponentialBackoffConfig configures backoff parameters.
//
// Note that these parameters operate on the upper bound for choosing a random
// value. For example, at Base=1s, a random value in [0,1s) will be chosen for
// the backoff value.
type ExponentialBackoffConfig struct {
// Base is the minimum bound for backing off after failure.
Base time.Duration
// Factor sets the amount of time by which the backoff grows with each
// failure.
Factor time.Duration
// Max is the absolute maxiumum bound for a single backoff.
Max time.Duration
}
// ExponentialBackoff implements random backoff with exponentially increasing
// bounds as the number consecutive failures increase.
type ExponentialBackoff struct {
failures uint64 // consecutive failure counter (needs to be 64-bit aligned)
config ExponentialBackoffConfig
}
// NewExponentialBackoff returns an exponential backoff strategy with the
// desired config. If config is nil, the default is returned.
func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff {
return &ExponentialBackoff{
config: config,
}
}
// Proceed returns the next randomly bound exponential backoff time.
func (b *ExponentialBackoff) Proceed(event Event) time.Duration {
return b.backoff(atomic.LoadUint64(&b.failures))
}
// Success resets the failures counter.
func (b *ExponentialBackoff) Success(event Event) {
atomic.StoreUint64(&b.failures, 0)
}
// Failure increments the failure counter.
func (b *ExponentialBackoff) Failure(event Event, err error) bool {
atomic.AddUint64(&b.failures, 1)
return false
}
// backoff calculates the amount of time to wait based on the number of
// consecutive failures.
func (b *ExponentialBackoff) backoff(failures uint64) time.Duration {
if failures <= 0 {
// proceed normally when there are no failures.
return 0
}
factor := b.config.Factor
if factor <= 0 {
factor = DefaultExponentialBackoffConfig.Factor
}
backoff := b.config.Base + factor*time.Duration(1<<(failures-1))
max := b.config.Max
if max <= 0 {
max = DefaultExponentialBackoffConfig.Max
}
if backoff > max || backoff < 0 {
backoff = max
}
// Choose a uniformly distributed value from [0, backoff).
return time.Duration(rand.Int63n(int64(backoff)))
}