mirror of
https://github.com/containers/skopeo.git
synced 2025-09-01 14:47:10 +00:00
Bump github.com/containers/image/v5 from 5.3.1 to 5.4.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.3.1 to 5.4.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.3.1...v5.4.0) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
committed by
Miloslav Trmač
parent
bd20786c38
commit
325327dc3f
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@@ -1 +1 @@
|
||||
1.16.6
|
||||
1.18.1
|
||||
|
4
vendor/github.com/containers/storage/go.mod
generated
vendored
4
vendor/github.com/containers/storage/go.mod
generated
vendored
@@ -5,12 +5,14 @@ require (
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
||||
github.com/Microsoft/hcsshim v0.8.7
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/klauspost/compress v1.10.3
|
||||
github.com/klauspost/pgzip v1.2.3
|
||||
github.com/mattn/go-shellwords v1.0.10
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/runc v1.0.0-rc9
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
|
||||
github.com/opencontainers/selinux v1.4.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||
@@ -20,7 +22,7 @@ require (
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/vbatts/tar-split v0.11.1
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
|
36
vendor/github.com/containers/storage/go.sum
generated
vendored
36
vendor/github.com/containers/storage/go.sum
generated
vendored
@@ -1,6 +1,7 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
|
||||
@@ -16,12 +17,19 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containers/common v0.5.0 h1:ZAef7h3oO46PcbTyfooZf8XLHrYad+GkhSu3EhH6P24=
|
||||
github.com/containers/common v0.5.0/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys=
|
||||
github.com/containers/common v0.6.1 h1:z9VeVXYeOnNV99uNLp7zoE5KO1n0hqz1mdm5a6AiIrA=
|
||||
github.com/containers/common v0.6.1/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys=
|
||||
github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@@ -32,15 +40,22 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0=
|
||||
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/pgzip v1.2.2 h1:8d4I0LDiieuGngsqlqOih9ker/NS0LX4V0i+EhiFWg0=
|
||||
@@ -53,14 +68,23 @@ github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvO
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/opencontainers/selinux v1.4.0 h1:cpiX/2wWIju/6My60T6/z9CxNG7c8xTQyEmA9fChpUo=
|
||||
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
@@ -77,10 +101,12 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
@@ -103,6 +129,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -115,6 +142,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -123,6 +151,9 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777 h1:wejkGHRTr38uaKRqECZlsCsJ1/TGxIyFbH32x5zUdu4=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -130,6 +161,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
@@ -138,8 +170,12 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
2
vendor/github.com/containers/storage/layers.go
generated
vendored
2
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -281,6 +281,8 @@ func copyLayer(l *Layer) *Layer {
|
||||
Flags: copyStringInterfaceMap(l.Flags),
|
||||
UIDMap: copyIDMap(l.UIDMap),
|
||||
GIDMap: copyIDMap(l.GIDMap),
|
||||
UIDs: copyUint32Slice(l.UIDs),
|
||||
GIDs: copyUint32Slice(l.GIDs),
|
||||
}
|
||||
}
|
||||
|
||||
|
12
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
@@ -134,6 +134,18 @@ type OptionsConfig struct {
|
||||
// should be used to set up default GID mappings.
|
||||
RemapGroup string `toml:"remap-group"`
|
||||
|
||||
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
|
||||
// /etc/subgid which should be used to set up automatically a userns.
|
||||
RootAutoUsernsUser string `toml:"root-auto-userns-user"`
|
||||
|
||||
// AutoUsernsMinSize is the minimum size for a user namespace that is
|
||||
// created automatically.
|
||||
AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"`
|
||||
|
||||
// AutoUsernsMaxSize is the maximum size for a user namespace that is
|
||||
// created automatically.
|
||||
AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"`
|
||||
|
||||
// Aufs container options to be handed to aufs drivers
|
||||
Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
|
||||
|
||||
|
22
vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build linux,cgo
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#cgo remoteclient CFLAGS: -Wall -Werror
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func getenv(name string) string {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
value := C.GoString(C.getenv(cName))
|
||||
|
||||
return value
|
||||
}
|
11
vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
generated
vendored
Normal file
11
vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build linux,!cgo
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func getenv(name string) string {
|
||||
return os.Getenv(name)
|
||||
}
|
291
vendor/github.com/containers/storage/pkg/unshare/unshare.c
generated
vendored
Normal file
291
vendor/github.com/containers/storage/pkg/unshare/unshare.c
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
#ifndef UNSHARE_NO_CODE_AT_ALL
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Open Source projects like conda-forge, want to package podman and are based
|
||||
off of centos:6, Conda-force has minimal libc requirements and is lacking
|
||||
the memfd.h file, so we use mmam.h
|
||||
*/
|
||||
#ifndef MFD_ALLOW_SEALING
|
||||
#define MFD_ALLOW_SEALING 2U
|
||||
#endif
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 1U
|
||||
#endif
|
||||
|
||||
#ifndef F_LINUX_SPECIFIC_BASE
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
#ifndef F_ADD_SEALS
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
#endif
|
||||
#ifndef F_SEAL_SEAL
|
||||
#define F_SEAL_SEAL 0x0001LU
|
||||
#endif
|
||||
#ifndef F_SEAL_SHRINK
|
||||
#define F_SEAL_SHRINK 0x0002LU
|
||||
#endif
|
||||
#ifndef F_SEAL_GROW
|
||||
#define F_SEAL_GROW 0x0004LU
|
||||
#endif
|
||||
#ifndef F_SEAL_WRITE
|
||||
#define F_SEAL_WRITE 0x0008LU
|
||||
#endif
|
||||
|
||||
#define BUFSTEP 1024
|
||||
|
||||
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
|
||||
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
|
||||
static int _containers_unshare_parse_envint(const char *envname) {
|
||||
char *p, *q;
|
||||
long l;
|
||||
|
||||
p = getenv(envname);
|
||||
if (p == NULL) {
|
||||
return -1;
|
||||
}
|
||||
q = NULL;
|
||||
l = strtol(p, &q, 10);
|
||||
if ((q == NULL) || (*q != '\0')) {
|
||||
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
|
||||
_exit(1);
|
||||
}
|
||||
unsetenv(envname);
|
||||
return l;
|
||||
}
|
||||
|
||||
static void _check_proc_sys_file(const char *path)
|
||||
{
|
||||
FILE *fp;
|
||||
char buf[32];
|
||||
size_t n_read;
|
||||
long r;
|
||||
|
||||
fp = fopen(path, "r");
|
||||
if (fp == NULL) {
|
||||
if (errno != ENOENT)
|
||||
fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces);
|
||||
} else {
|
||||
memset(buf, 0, sizeof(buf));
|
||||
n_read = fread(buf, 1, sizeof(buf) - 1, fp);
|
||||
if (n_read > 0) {
|
||||
r = atoi(buf);
|
||||
if (r == 0) {
|
||||
fprintf(stderr, "User namespaces are not enabled in %s.\n", path);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
static char **parse_proc_stringlist(const char *list) {
|
||||
int fd, n, i, n_strings;
|
||||
char *buf, *new_buf, **ret;
|
||||
size_t size, new_size, used;
|
||||
|
||||
fd = open(list, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
buf = NULL;
|
||||
size = 0;
|
||||
used = 0;
|
||||
for (;;) {
|
||||
new_size = used + BUFSTEP;
|
||||
new_buf = realloc(buf, new_size);
|
||||
if (new_buf == NULL) {
|
||||
free(buf);
|
||||
fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
|
||||
return NULL;
|
||||
}
|
||||
buf = new_buf;
|
||||
size = new_size;
|
||||
memset(buf + used, '\0', size - used);
|
||||
n = read(fd, buf + used, size - used - 1);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read(): %m\n");
|
||||
return NULL;
|
||||
}
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
used += n;
|
||||
}
|
||||
close(fd);
|
||||
n_strings = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
n_strings++;
|
||||
}
|
||||
}
|
||||
ret = calloc(n_strings + 1, sizeof(char *));
|
||||
if (ret == NULL) {
|
||||
fprintf(stderr, "calloc(): out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
i = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
ret[i++] = &buf[n];
|
||||
}
|
||||
}
|
||||
ret[i] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int containers_reexec(void) {
|
||||
char **argv, *exename;
|
||||
int fd, mmfd, n_read, n_written;
|
||||
struct stat st;
|
||||
char buf[2048];
|
||||
|
||||
argv = parse_proc_stringlist("/proc/self/cmdline");
|
||||
if (argv == NULL) {
|
||||
return -1;
|
||||
}
|
||||
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fstat(fd, &st) == -1) {
|
||||
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
exename = basename(argv[0]);
|
||||
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
|
||||
if (mmfd == -1) {
|
||||
fprintf(stderr, "memfd_create(): %m\n");
|
||||
return -1;
|
||||
}
|
||||
for (;;) {
|
||||
n_read = read(fd, buf, sizeof(buf));
|
||||
if (n_read < 0) {
|
||||
fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_read == 0) {
|
||||
break;
|
||||
}
|
||||
n_written = write(mmfd, buf, n_read);
|
||||
if (n_written < 0) {
|
||||
fprintf(stderr, "write(anonfd): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_written != n_read) {
|
||||
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error sealing memfd copy: %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fexecve(mmfd, argv, environ) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error during reexec(...): %m\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void _containers_unshare(void)
|
||||
{
|
||||
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
|
||||
char buf[2048];
|
||||
|
||||
flags = _containers_unshare_parse_envint("_Containers-unshare");
|
||||
if (flags == -1) {
|
||||
return;
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (unshare(CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
|
||||
_check_proc_sys_file (_max_user_namespaces);
|
||||
_check_proc_sys_file (_unprivileged_user_namespaces);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
|
||||
if (pidfd != -1) {
|
||||
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
|
||||
size_t size = write(pidfd, buf, strlen(buf));
|
||||
if (size != strlen(buf)) {
|
||||
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
|
||||
_exit(1);
|
||||
}
|
||||
close(pidfd);
|
||||
}
|
||||
continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
|
||||
if (continuefd != -1) {
|
||||
n = read(continuefd, buf, sizeof(buf));
|
||||
if (n > 0) {
|
||||
fprintf(stderr, "Error: %.*s\n", n, buf);
|
||||
_exit(1);
|
||||
}
|
||||
close(continuefd);
|
||||
}
|
||||
sid = _containers_unshare_parse_envint("_Containers-setsid");
|
||||
if (sid == 1) {
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "Error during setsid: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
|
||||
if (pgrp == 1) {
|
||||
if (setpgrp() == -1) {
|
||||
fprintf(stderr, "Error during setpgrp: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
ctty = _containers_unshare_parse_envint("_Containers-ctty");
|
||||
if (ctty != -1) {
|
||||
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
|
||||
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (setresgid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresgid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
if (setresuid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresuid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & ~CLONE_NEWUSER) != 0) {
|
||||
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(...): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if (containers_reexec() != 0) {
|
||||
_exit(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif // !UNSHARE_NO_CODE_AT_ALL
|
22
vendor/github.com/containers/storage/pkg/unshare/unshare.go
generated
vendored
Normal file
22
vendor/github.com/containers/storage/pkg/unshare/unshare.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HomeDir returns the home directory for the current user.
|
||||
func HomeDir() (string, error) {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID()))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to resolve HOME directory")
|
||||
}
|
||||
home = usr.HomeDir
|
||||
}
|
||||
return home, nil
|
||||
}
|
10
vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build linux,cgo,!gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
25
vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build linux,cgo,gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall -Wextra
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// This next bit is straight out of libcontainer.
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
605
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
Normal file
605
vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
generated
vendored
Normal file
@@ -0,0 +1,605 @@
|
||||
// +build linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
// handles setting ID maps and other related settings by triggering
|
||||
// initialization code in the child.
|
||||
type Cmd struct {
|
||||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping // nolint: golint
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping // nolint: golint
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
Ctty *os.File
|
||||
OOMScoreAdj *int
|
||||
Hook func(pid int) error
|
||||
}
|
||||
|
||||
// Command creates a new Cmd which can be customized.
|
||||
func Command(args ...string) *Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
return &Cmd{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func getRootlessUID() int {
|
||||
uidEnv := getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
func getRootlessGID() int {
|
||||
gidEnv := getenv("_CONTAINERS_ROOTLESS_GID")
|
||||
if gidEnv != "" {
|
||||
u, _ := strconv.Atoi(gidEnv)
|
||||
return u
|
||||
}
|
||||
|
||||
/* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getegid()
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set an environment variable to tell the child to synchronize its startup.
|
||||
if c.Env == nil {
|
||||
c.Env = os.Environ()
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
|
||||
|
||||
// Please the libpod "rootless" package to find the expected env variables.
|
||||
if IsRootless() {
|
||||
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID()))
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID()))
|
||||
}
|
||||
|
||||
// Create the pipe for reading the child's PID.
|
||||
pidRead, pidWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
|
||||
|
||||
// Create the pipe for letting the child know to proceed.
|
||||
continueRead, continueWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
pidRead.Close()
|
||||
pidWrite.Close()
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, continueRead)
|
||||
|
||||
// Pass along other instructions.
|
||||
if c.Setsid {
|
||||
c.Env = append(c.Env, "_Containers-setsid=1")
|
||||
}
|
||||
if c.Setpgrp {
|
||||
c.Env = append(c.Env, "_Containers-setpgrp=1")
|
||||
}
|
||||
if c.Ctty != nil {
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
|
||||
}
|
||||
|
||||
// Make sure we clean up our pipes.
|
||||
defer func() {
|
||||
if pidRead != nil {
|
||||
pidRead.Close()
|
||||
}
|
||||
if pidWrite != nil {
|
||||
pidWrite.Close()
|
||||
}
|
||||
if continueRead != nil {
|
||||
continueRead.Close()
|
||||
}
|
||||
if continueWrite != nil {
|
||||
continueWrite.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the new process.
|
||||
err = c.Cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
pidWrite.Close()
|
||||
pidWrite = nil
|
||||
|
||||
// Read the child's PID from the pipe.
|
||||
pidString := ""
|
||||
b := new(bytes.Buffer)
|
||||
if _, err := io.Copy(b, pidRead); err != nil {
|
||||
return errors.Wrapf(err, "error reading child PID")
|
||||
}
|
||||
pidString = b.String()
|
||||
pid, err := strconv.Atoi(pidString)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error parsing PID %q", pidString)
|
||||
}
|
||||
pidString = fmt.Sprintf("%d", pid)
|
||||
|
||||
// If we created a new user namespace, set any specified mappings.
|
||||
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
// Always set "setgroups".
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if c.GidMappingsEnableSetgroups {
|
||||
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
} else {
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
|
||||
uidmap, gidmap, err := GetHostIDMappings("")
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
|
||||
return errors.Wrapf(err, "error reading ID mappings in parent")
|
||||
}
|
||||
if len(c.UidMappings) == 0 {
|
||||
c.UidMappings = uidmap
|
||||
for i := range c.UidMappings {
|
||||
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(c.GidMappings) == 0 {
|
||||
c.GidMappings = gidmap
|
||||
for i := range c.GidMappings {
|
||||
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.GidMappings) > 0 {
|
||||
// Build the GID map, since writing to the proc file has to be done all at once.
|
||||
g := new(bytes.Buffer)
|
||||
for _, m := range c.GidMappings {
|
||||
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
gidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewgidmap {
|
||||
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
gidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
g.Reset()
|
||||
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
|
||||
}
|
||||
}
|
||||
if !gidmapSet {
|
||||
if c.UseNewgidmap {
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
|
||||
}
|
||||
defer gidmap.Close()
|
||||
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) > 0 {
|
||||
// Build the UID map, since writing to the proc file has to be done all at once.
|
||||
u := new(bytes.Buffer)
|
||||
for _, m := range c.UidMappings {
|
||||
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
uidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewuidmap {
|
||||
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
uidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
u.Reset()
|
||||
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
|
||||
}
|
||||
}
|
||||
if !uidmapSet {
|
||||
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
|
||||
}
|
||||
defer uidmap.Close()
|
||||
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OOMScoreAdj != nil {
|
||||
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
|
||||
}
|
||||
defer oomScoreAdj.Close()
|
||||
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
|
||||
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
|
||||
}
|
||||
}
|
||||
// Run any additional setup that we want to do before the child starts running proper.
|
||||
if c.Hook != nil {
|
||||
if err = c.Hook(pid); err != nil {
|
||||
fmt.Fprintf(continueWrite, "hook error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
func (c *Cmd) CombinedOutput() ([]byte, error) {
|
||||
return nil, errors.New("unshare: CombinedOutput() not implemented")
|
||||
}
|
||||
|
||||
func (c *Cmd) Output() ([]byte, error) {
|
||||
return nil, errors.New("unshare: Output() not implemented")
|
||||
}
|
||||
|
||||
var (
|
||||
isRootlessOnce sync.Once
|
||||
isRootless bool
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
isRootlessOnce.Do(func() {
|
||||
isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != ""
|
||||
})
|
||||
return isRootless
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
uidEnv := getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
}
|
||||
|
||||
type Runnable interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
} else {
|
||||
logrus.Errorf("%v", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
// If we've already been through this once, no need to try again.
|
||||
if os.Geteuid() == 0 && IsRootless() {
|
||||
return
|
||||
}
|
||||
|
||||
var uidNum, gidNum uint64
|
||||
// Figure out who we are.
|
||||
me, err := user.Current()
|
||||
if !os.IsNotExist(err) {
|
||||
bailOnError(err, "error determining current user")
|
||||
uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
|
||||
bailOnError(err, "error parsing current UID %s", me.Uid)
|
||||
gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
|
||||
bailOnError(err, "error parsing current GID %s", me.Gid)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// ID mappings to use to reexec ourselves.
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
if uidNum != 0 || evenForRoot {
|
||||
// Read the set of ID mappings that we're allowed to use. Each
|
||||
// range in /etc/subuid and /etc/subgid file is a starting host
|
||||
// ID and a range size.
|
||||
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
|
||||
if err != nil {
|
||||
logrus.Warnf("error reading allowed ID mappings: %v", err)
|
||||
}
|
||||
if len(uidmap) == 0 {
|
||||
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
|
||||
}
|
||||
// Map our UID and GID, then the subuid and subgid ranges,
|
||||
// consecutively, starting at 0, to get the mappings to use for
|
||||
// a copy of ourselves.
|
||||
uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
|
||||
gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
|
||||
var rangeStart uint32
|
||||
for i := range uidmap {
|
||||
uidmap[i].ContainerID = rangeStart
|
||||
rangeStart += uidmap[i].Size
|
||||
}
|
||||
rangeStart = 0
|
||||
for i := range gidmap {
|
||||
gidmap[i].ContainerID = rangeStart
|
||||
rangeStart += gidmap[i].Size
|
||||
}
|
||||
} else {
|
||||
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
|
||||
// to use unshare(), so don't bother creating a new user namespace at this point.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
bailOnError(err, "error reading the current capabilities sets")
|
||||
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
|
||||
return
|
||||
}
|
||||
// Read the set of ID mappings that we're currently using.
|
||||
uidmap, gidmap, err = GetHostIDMappings("")
|
||||
bailOnError(err, "error reading current ID mappings")
|
||||
// Just reuse them.
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
|
||||
// Unlike most uses of reexec or unshare, we're using a name that
|
||||
// _won't_ be recognized as a registered reexec handler, since we
|
||||
// _want_ to fall through reexec.Init() to the normal main().
|
||||
cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
|
||||
|
||||
// If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
|
||||
err = os.Setenv(UsernsEnvName, "1")
|
||||
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
|
||||
|
||||
// Set the default isolation type to use the "rootless" method.
|
||||
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
|
||||
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse our stdio.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up a new user namespace with the ID mapping.
|
||||
cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
|
||||
cmd.UseNewuidmap = uidNum != 0
|
||||
cmd.UidMappings = uidmap
|
||||
cmd.UseNewgidmap = uidNum != 0
|
||||
cmd.GidMappings = gidmap
|
||||
cmd.GidMappingsEnableSetgroups = true
|
||||
|
||||
// Finish up.
|
||||
logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
|
||||
ExecRunnable(cmd, nil)
|
||||
}
|
||||
|
||||
// ExecRunnable runs the specified unshare command, captures its exit status,
|
||||
// and exits with the same status.
|
||||
func ExecRunnable(cmd Runnable, cleanup func()) {
|
||||
exit := func(status int) {
|
||||
if cleanup != nil {
|
||||
cleanup()
|
||||
}
|
||||
os.Exit(status)
|
||||
}
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
|
||||
if exitError.ProcessState.Exited() {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
exit(waitStatus.ExitStatus())
|
||||
}
|
||||
if waitStatus.Signaled() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
exit(int(waitStatus.Signal()) + 128)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Errorf("%v", err)
|
||||
logrus.Errorf("(unable to determine exit status)")
|
||||
exit(1)
|
||||
}
|
||||
exit(0)
|
||||
}
|
||||
|
||||
// getHostIDMappings reads mappings from the named node under /proc.
|
||||
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
|
||||
var mappings []specs.LinuxIDMapping
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
|
||||
}
|
||||
cid, err := strconv.ParseUint(fields[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
|
||||
}
|
||||
hid, err := strconv.ParseUint(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
|
||||
}
|
||||
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
if pid == "" {
|
||||
pid = "self"
|
||||
}
|
||||
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
|
||||
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
mappings, err := idtools.NewIDMappings(user, group)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
|
||||
}
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
for _, m := range mappings.UIDs() {
|
||||
uidmap = append(uidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
for _, m := range mappings.GIDs() {
|
||||
gidmap = append(gidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
45
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
45
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, nil
|
||||
}
|
10
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build !linux,cgo
|
||||
|
||||
package unshare
|
||||
|
||||
// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'.
|
||||
// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile.
|
||||
// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code.
|
||||
|
||||
// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL
|
||||
import "C"
|
18
vendor/github.com/containers/storage/storage.conf
generated
vendored
18
vendor/github.com/containers/storage/storage.conf
generated
vendored
@@ -43,8 +43,22 @@ additionalimagestores = [
|
||||
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
|
||||
# until all of the entries have been used for maps.
|
||||
#
|
||||
# remap-user = "storage"
|
||||
# remap-group = "storage"
|
||||
# remap-user = "containers"
|
||||
# remap-group = "containers"
|
||||
|
||||
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
|
||||
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partioned
|
||||
# to containers configured to create automatically a user namespace. Containers
|
||||
# configured to automatically create a user namespace can still overlap with containers
|
||||
# having an explicit mapping set.
|
||||
# This setting is ignored when running as rootless.
|
||||
# root-auto-userns-user = "storage"
|
||||
#
|
||||
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
|
||||
# auto-userns-min-size=1024
|
||||
#
|
||||
# Auto-userns-max-size is the minimum size for a user namespace created automatically.
|
||||
# auto-userns-max-size=65536
|
||||
|
||||
[storage.options.overlay]
|
||||
# ignore_chown_errors can be set to allow a non privileged user running with
|
||||
|
131
vendor/github.com/containers/storage/store.go
generated
vendored
131
vendor/github.com/containers/storage/store.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/stringutils"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
@@ -152,6 +153,13 @@ type StoreOptions struct {
|
||||
// for use inside of a user namespace where UID mapping is being used.
|
||||
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
|
||||
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
|
||||
// RootAutoNsUser is the user used to pick a subrange when automatically setting
|
||||
// a user namespace for the root user.
|
||||
RootAutoNsUser string `json:"root_auto_ns_user,omitempty"`
|
||||
// AutoNsMinSize is the minimum size for an automatic user namespace.
|
||||
AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"`
|
||||
// AutoNsMaxSize is the maximum size for an automatic user namespace.
|
||||
AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"`
|
||||
}
|
||||
|
||||
// Store wraps up the various types of file-based stores that we use into a
|
||||
@@ -469,6 +477,27 @@ type Store interface {
|
||||
GetDigestLock(digest.Digest) (Locker, error)
|
||||
}
|
||||
|
||||
// AutoUserNsOptions defines how to automatically create a user namespace.
|
||||
type AutoUserNsOptions struct {
|
||||
// Size defines the size for the user namespace. If it is set to a
|
||||
// value bigger than 0, the user namespace will have exactly this size.
|
||||
// If it is not set, some heuristics will be used to find its size.
|
||||
Size uint32
|
||||
// InitialSize defines the minimum size for the user namespace.
|
||||
// The created user namespace will have at least this size.
|
||||
InitialSize uint32
|
||||
// PasswdFile to use if the container uses a volume.
|
||||
PasswdFile string
|
||||
// GroupFile to use if the container uses a volume.
|
||||
GroupFile string
|
||||
// AdditionalUIDMappings specified additional UID mappings to include in
|
||||
// the generated user namespace.
|
||||
AdditionalUIDMappings []idtools.IDMap
|
||||
// AdditionalGIDMappings specified additional GID mappings to include in
|
||||
// the generated user namespace.
|
||||
AdditionalGIDMappings []idtools.IDMap
|
||||
}
|
||||
|
||||
// IDMappingOptions are used for specifying how ID mapping should be set up for
|
||||
// a layer or container.
|
||||
type IDMappingOptions struct {
|
||||
@@ -485,6 +514,8 @@ type IDMappingOptions struct {
|
||||
HostGIDMapping bool
|
||||
UIDMap []idtools.IDMap
|
||||
GIDMap []idtools.IDMap
|
||||
AutoUserNs bool
|
||||
AutoUserNsOpts AutoUserNsOptions
|
||||
}
|
||||
|
||||
// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods.
|
||||
@@ -525,11 +556,17 @@ type store struct {
|
||||
lastLoaded time.Time
|
||||
runRoot string
|
||||
graphLock Locker
|
||||
usernsLock Locker
|
||||
graphRoot string
|
||||
graphDriverName string
|
||||
graphOptions []string
|
||||
uidMap []idtools.IDMap
|
||||
gidMap []idtools.IDMap
|
||||
autoUsernsUser string
|
||||
autoUIDMap []idtools.IDMap // Set by getAvailableMappings()
|
||||
autoGIDMap []idtools.IDMap // Set by getAvailableMappings()
|
||||
autoNsMinSize uint32
|
||||
autoNsMaxSize uint32
|
||||
graphDriver drivers.Driver
|
||||
layerStore LayerStore
|
||||
roLayerStores []ROLayerStore
|
||||
@@ -608,6 +645,20 @@ func GetStore(options StoreOptions) (Store, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
autoNsMinSize := options.AutoNsMinSize
|
||||
autoNsMaxSize := options.AutoNsMaxSize
|
||||
if autoNsMinSize == 0 {
|
||||
autoNsMinSize = AutoUserNsMinSize
|
||||
}
|
||||
if autoNsMaxSize == 0 {
|
||||
autoNsMaxSize = AutoUserNsMaxSize
|
||||
}
|
||||
s := &store{
|
||||
runRoot: options.RunRoot,
|
||||
graphLock: graphLock,
|
||||
@@ -616,6 +667,12 @@ func GetStore(options StoreOptions) (Store, error) {
|
||||
graphOptions: options.GraphDriverOptions,
|
||||
uidMap: copyIDMap(options.UIDMap),
|
||||
gidMap: copyIDMap(options.GIDMap),
|
||||
autoUsernsUser: options.RootAutoNsUser,
|
||||
autoNsMinSize: autoNsMinSize,
|
||||
autoNsMaxSize: autoNsMaxSize,
|
||||
autoUIDMap: nil,
|
||||
autoGIDMap: nil,
|
||||
usernsLock: usernsLock,
|
||||
}
|
||||
if err := s.load(); err != nil {
|
||||
return nil, err
|
||||
@@ -626,6 +683,18 @@ func GetStore(options StoreOptions) (Store, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func copyUint32Slice(slice []uint32) []uint32 {
|
||||
m := []uint32{}
|
||||
if slice != nil {
|
||||
m = make([]uint32, len(slice))
|
||||
copy(m, slice)
|
||||
}
|
||||
if len(m) > 0 {
|
||||
return m[:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
|
||||
m := []idtools.IDMap{}
|
||||
if idmap != nil {
|
||||
@@ -1151,21 +1220,32 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
|
||||
var imageTopLayer *Layer
|
||||
imageID := ""
|
||||
uidMap := options.UIDMap
|
||||
gidMap := options.GIDMap
|
||||
|
||||
idMappingsOptions := options.IDMappingOptions
|
||||
if options.AutoUserNs || options.UIDMap != nil || options.GIDMap != nil {
|
||||
// Prevent multiple instances to retrieve the same range when AutoUserNs
|
||||
// are used.
|
||||
// It doesn't prevent containers that specify an explicit mapping to overlap
|
||||
// with AutoUserNs.
|
||||
s.usernsLock.Lock()
|
||||
defer s.usernsLock.Unlock()
|
||||
}
|
||||
|
||||
var imageHomeStore ROImageStore
|
||||
var istore ImageStore
|
||||
var istores []ROImageStore
|
||||
var lstores []ROLayerStore
|
||||
var cimage *Image
|
||||
if image != "" {
|
||||
var imageHomeStore ROImageStore
|
||||
lstores, err := s.ROLayerStores()
|
||||
var err error
|
||||
lstores, err = s.ROLayerStores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
istore, err := s.ImageStore()
|
||||
istore, err = s.ImageStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
istores, err := s.ROImageStores()
|
||||
istores, err = s.ROImageStores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1176,7 +1256,6 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var cimage *Image
|
||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||
store := s
|
||||
if store == istore {
|
||||
@@ -1200,7 +1279,21 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
|
||||
}
|
||||
imageID = cimage.ID
|
||||
}
|
||||
|
||||
if options.AutoUserNs {
|
||||
var err error
|
||||
options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
uidMap := options.UIDMap
|
||||
gidMap := options.GIDMap
|
||||
|
||||
idMappingsOptions := options.IDMappingOptions
|
||||
if image != "" {
|
||||
if cimage.TopLayer != "" {
|
||||
createMappedLayer := imageHomeStore == istore
|
||||
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
|
||||
@@ -2356,14 +2449,15 @@ func (s *store) DeleteContainer(id string) error {
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
var errors []error
|
||||
for {
|
||||
select {
|
||||
case err, ok := <-errChan:
|
||||
if !ok {
|
||||
return nil
|
||||
return multierror.Append(nil, errors...).ErrorOrNil()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3305,6 +3399,16 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
|
||||
// defaultConfigFile path to the system wide storage.conf file
|
||||
const defaultConfigFile = "/etc/containers/storage.conf"
|
||||
|
||||
// AutoUserNsMinSize is the minimum size for automatically created user namespaces
|
||||
const AutoUserNsMinSize = 1024
|
||||
|
||||
// AutoUserNsMaxSize is the maximum size for automatically created user namespaces
|
||||
const AutoUserNsMaxSize = 65536
|
||||
|
||||
// RootAutoUserNsUser is the default user used for root containers when automatically
|
||||
// creating a user namespace.
|
||||
const RootAutoUserNsUser = "containers"
|
||||
|
||||
// DefaultConfigFile returns the path to the storage config file used
|
||||
func DefaultConfigFile(rootless bool) (string, error) {
|
||||
if rootless {
|
||||
@@ -3406,6 +3510,13 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
} else {
|
||||
storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
|
||||
}
|
||||
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
|
||||
if config.Storage.Options.AutoUsernsMinSize > 0 {
|
||||
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
|
||||
}
|
||||
if config.Storage.Options.AutoUsernsMaxSize > 0 {
|
||||
storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize
|
||||
}
|
||||
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
|
||||
|
||||
|
457
vendor/github.com/containers/storage/userns.go
generated
vendored
Normal file
457
vendor/github.com/containers/storage/userns.go
generated
vendored
Normal file
@@ -0,0 +1,457 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// getAdditionalSubIDs looks up the additional IDs configured for
|
||||
// the specified user.
|
||||
// The argument USERNAME is ignored for rootless users, as it is not
|
||||
// possible to use an arbitrary entry in /etc/sub*id.
|
||||
// Differently, if the username is not specified for root users, a
|
||||
// default name is used.
|
||||
func getAdditionalSubIDs(username string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
var uids, gids []idtools.IDMap
|
||||
|
||||
if unshare.IsRootless() {
|
||||
username = os.Getenv("USER")
|
||||
if username == "" {
|
||||
var id string
|
||||
if os.Geteuid() == 0 {
|
||||
id = strconv.Itoa(unshare.GetRootlessUID())
|
||||
} else {
|
||||
id = strconv.Itoa(os.Geteuid())
|
||||
}
|
||||
userID, err := user.LookupId(id)
|
||||
if err == nil {
|
||||
username = userID.Username
|
||||
}
|
||||
}
|
||||
} else if username == "" {
|
||||
username = RootAutoUserNsUser
|
||||
}
|
||||
mappings, err := idtools.NewIDMappings(username, username)
|
||||
if err != nil {
|
||||
logrus.Errorf("cannot find mappings for user %q: %v", username, err)
|
||||
} else {
|
||||
uids = mappings.UIDs()
|
||||
gids = mappings.GIDs()
|
||||
}
|
||||
return uids, gids, nil
|
||||
}
|
||||
|
||||
// getAvailableMappings returns the list of ranges that are usable by the current user.
|
||||
// When running as root, it looks up the additional IDs assigned to the specified user.
|
||||
// When running as rootless, the mappings assigned to the unprivileged user are converted
|
||||
// to the IDs inside of the initial rootless user namespace.
|
||||
func (s *store) getAvailableMappings() ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
if s.autoUIDMap == nil {
|
||||
uids, gids, err := getAdditionalSubIDs(s.autoUsernsUser)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Store the result so we don't need to look it up again next time
|
||||
s.autoUIDMap, s.autoGIDMap = uids, gids
|
||||
}
|
||||
|
||||
uids := s.autoUIDMap
|
||||
gids := s.autoGIDMap
|
||||
|
||||
if !unshare.IsRootless() {
|
||||
// No mapping to inner namespace needed
|
||||
return copyIDMap(uids), copyIDMap(gids), nil
|
||||
}
|
||||
|
||||
// We are already inside of the rootless user namespace.
|
||||
// We need to remap the configured mappings to what is available
|
||||
// inside of the rootless userns.
|
||||
totaluid := 0
|
||||
totalgid := 0
|
||||
for _, u := range uids {
|
||||
totaluid += u.Size
|
||||
}
|
||||
for _, g := range gids {
|
||||
totalgid += g.Size
|
||||
}
|
||||
|
||||
u := []idtools.IDMap{{ContainerID: 0, HostID: 1, Size: totaluid}}
|
||||
g := []idtools.IDMap{{ContainerID: 0, HostID: 1, Size: totalgid}}
|
||||
return u, g, nil
|
||||
}
|
||||
|
||||
// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
|
||||
// /etc/group files.
|
||||
func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
||||
if passwdFile == "" {
|
||||
passwdFile = filepath.Join(containerMount, "etc/passwd")
|
||||
}
|
||||
if groupFile == "" {
|
||||
groupFile = filepath.Join(groupFile, "etc/group")
|
||||
}
|
||||
|
||||
size := 0
|
||||
|
||||
users, err := libcontainerUser.ParsePasswdFile(passwdFile)
|
||||
if err == nil {
|
||||
for _, u := range users {
|
||||
// Skip the "nobody" user otherwise we end up with 65536
|
||||
// ids with most images
|
||||
if u.Name == "nobody" {
|
||||
continue
|
||||
}
|
||||
if u.Uid > size {
|
||||
size = u.Uid
|
||||
}
|
||||
if u.Gid > size {
|
||||
size = u.Uid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
groups, err := libcontainerUser.ParseGroupFile(groupFile)
|
||||
if err == nil {
|
||||
for _, g := range groups {
|
||||
if g.Name == "nobody" {
|
||||
continue
|
||||
}
|
||||
if g.Gid > size {
|
||||
size = g.Gid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return uint32(size)
|
||||
}
|
||||
|
||||
// getMaxSizeFromImage returns the maximum ID used by the specified image.
|
||||
// The layer stores must be already locked.
|
||||
func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) {
|
||||
lstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
lstores, err := s.ROLayerStores()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size := uint32(0)
|
||||
|
||||
var topLayer *Layer
|
||||
layerName := image.TopLayer
|
||||
outer:
|
||||
for {
|
||||
for _, ls := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
layer, err := ls.Get(layerName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if image.TopLayer == layerName {
|
||||
topLayer = layer
|
||||
}
|
||||
for _, uid := range layer.UIDs {
|
||||
if uid >= size {
|
||||
size = uid + 1
|
||||
}
|
||||
}
|
||||
for _, gid := range layer.GIDs {
|
||||
if gid >= size {
|
||||
size = gid + 1
|
||||
}
|
||||
}
|
||||
layerName = layer.Parent
|
||||
if layerName == "" {
|
||||
break outer
|
||||
}
|
||||
continue outer
|
||||
}
|
||||
return 0, errors.Errorf("cannot find layer %q", layerName)
|
||||
}
|
||||
|
||||
rlstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
layerOptions := &LayerOptions{
|
||||
IDMappingOptions: IDMappingOptions{
|
||||
HostUIDMapping: true,
|
||||
HostGIDMapping: true,
|
||||
UIDMap: nil,
|
||||
GIDMap: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// We need to create a temporary layer so we can mount it and lookup the
|
||||
// maximum IDs used.
|
||||
clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rlstore.Delete(clayer.ID)
|
||||
|
||||
mountOptions := drivers.MountOpts{
|
||||
MountLabel: "",
|
||||
UidMaps: nil,
|
||||
GidMaps: nil,
|
||||
Options: nil,
|
||||
}
|
||||
|
||||
mountpoint, err := rlstore.Mount(clayer.ID, mountOptions)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rlstore.Unmount(clayer.ID, true)
|
||||
|
||||
userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile)
|
||||
if userFilesSize > size {
|
||||
size = userFilesSize
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// subtractHostIDs return the subtraction of the range USED from AVAIL. The range is specified
|
||||
// by [HostID, HostID+Size).
|
||||
// ContainerID is ignored.
|
||||
func subtractHostIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
|
||||
switch {
|
||||
case used.HostID <= avail.HostID && used.HostID+used.Size >= avail.HostID+avail.Size:
|
||||
return nil
|
||||
case used.HostID <= avail.HostID && used.HostID+used.Size > avail.HostID && used.HostID+used.Size < avail.HostID+avail.Size:
|
||||
newContainerID := used.HostID + used.Size
|
||||
newHostID := used.HostID + used.Size
|
||||
r := idtools.IDMap{
|
||||
ContainerID: newContainerID,
|
||||
HostID: newHostID,
|
||||
Size: avail.Size + avail.HostID - newHostID,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size >= avail.HostID+avail.Size:
|
||||
r := idtools.IDMap{
|
||||
ContainerID: avail.ContainerID,
|
||||
HostID: avail.HostID,
|
||||
Size: used.HostID - avail.HostID,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size < avail.HostID+avail.Size:
|
||||
r1 := idtools.IDMap{
|
||||
ContainerID: avail.ContainerID,
|
||||
HostID: avail.HostID,
|
||||
Size: used.HostID - avail.HostID,
|
||||
}
|
||||
r2 := idtools.IDMap{
|
||||
ContainerID: used.ContainerID + used.Size,
|
||||
HostID: used.HostID + used.Size,
|
||||
Size: avail.HostID + avail.Size - used.HostID - used.Size,
|
||||
}
|
||||
return []idtools.IDMap{r1, r2}
|
||||
default:
|
||||
r := idtools.IDMap{
|
||||
ContainerID: 0,
|
||||
HostID: avail.HostID,
|
||||
Size: avail.Size,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
}
|
||||
}
|
||||
|
||||
// subtractContainerIDs return the subtraction of the range USED from AVAIL. The range is specified
|
||||
// by [ContainerID, ContainerID+Size).
|
||||
// HostID is ignored.
|
||||
func subtractContainerIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
|
||||
switch {
|
||||
case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
|
||||
return nil
|
||||
case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size > avail.ContainerID && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
|
||||
newContainerID := used.ContainerID + used.Size
|
||||
newHostID := used.HostID + used.Size
|
||||
r := idtools.IDMap{
|
||||
ContainerID: newContainerID,
|
||||
HostID: newHostID,
|
||||
Size: avail.Size + avail.ContainerID - newContainerID,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
|
||||
r := idtools.IDMap{
|
||||
ContainerID: avail.ContainerID,
|
||||
HostID: avail.HostID,
|
||||
Size: used.ContainerID - avail.ContainerID,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
|
||||
r1 := idtools.IDMap{
|
||||
ContainerID: avail.ContainerID,
|
||||
HostID: avail.HostID,
|
||||
Size: used.ContainerID - avail.ContainerID,
|
||||
}
|
||||
r2 := idtools.IDMap{
|
||||
ContainerID: used.ContainerID + used.Size,
|
||||
HostID: used.HostID + used.Size,
|
||||
Size: avail.ContainerID + avail.Size - used.ContainerID - used.Size,
|
||||
}
|
||||
return []idtools.IDMap{r1, r2}
|
||||
default:
|
||||
r := idtools.IDMap{
|
||||
ContainerID: avail.ContainerID,
|
||||
HostID: avail.HostID,
|
||||
Size: avail.Size,
|
||||
}
|
||||
return []idtools.IDMap{r}
|
||||
}
|
||||
}
|
||||
|
||||
// subtractAll subtracts all usedIDs from the available IDs.
|
||||
func subtractAll(availableIDs, usedIDs []idtools.IDMap, host bool) []idtools.IDMap {
|
||||
for _, u := range usedIDs {
|
||||
for i := 0; i < len(availableIDs); {
|
||||
var prev []idtools.IDMap
|
||||
if i > 0 {
|
||||
prev = availableIDs[:i-1]
|
||||
}
|
||||
next := availableIDs[i+1:]
|
||||
cur := availableIDs[i]
|
||||
var newRanges []idtools.IDMap
|
||||
if host {
|
||||
newRanges = subtractHostIDs(cur, u)
|
||||
} else {
|
||||
newRanges = subtractContainerIDs(cur, u)
|
||||
}
|
||||
availableIDs = append(append(prev, newRanges...), next...)
|
||||
i += len(newRanges)
|
||||
}
|
||||
}
|
||||
return availableIDs
|
||||
}
|
||||
|
||||
// findAvailableIDRange returns the list of IDs that are not used by existing containers.
|
||||
// This function is used to lookup both UIDs and GIDs.
|
||||
func findAvailableIDRange(size uint32, availableIDs, usedIDs []idtools.IDMap) ([]idtools.IDMap, error) {
|
||||
var avail []idtools.IDMap
|
||||
|
||||
// ContainerID will be adjusted later.
|
||||
for _, i := range availableIDs {
|
||||
n := idtools.IDMap{
|
||||
ContainerID: 0,
|
||||
HostID: i.HostID,
|
||||
Size: i.Size,
|
||||
}
|
||||
avail = append(avail, n)
|
||||
}
|
||||
avail = subtractAll(avail, usedIDs, true)
|
||||
|
||||
currentID := 0
|
||||
remaining := size
|
||||
// We know the size for each intervals, let's adjust the ContainerID for each
|
||||
// of them.
|
||||
for i := 0; i < len(avail); i++ {
|
||||
avail[i].ContainerID = currentID
|
||||
if uint32(avail[i].Size) >= remaining {
|
||||
avail[i].Size = int(remaining)
|
||||
return avail[:i+1], nil
|
||||
}
|
||||
remaining -= uint32(avail[i].Size)
|
||||
}
|
||||
|
||||
return nil, errors.New("could not find enough available IDs")
|
||||
}
|
||||
|
||||
// findAvailableRange returns both the list of UIDs and GIDs ranges that are not
|
||||
// currently used by other containers.
|
||||
// It is a wrapper for findAvailableIDRange.
|
||||
func findAvailableRange(sizeUID, sizeGID uint32, availableUIDs, availableGIDs, usedUIDs, usedGIDs []idtools.IDMap) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
UIDMap, err := findAvailableIDRange(sizeUID, availableUIDs, usedUIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
GIDMap, err := findAvailableIDRange(sizeGID, availableGIDs, usedGIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return UIDMap, GIDMap, nil
|
||||
}
|
||||
|
||||
// getAutoUserNS creates an automatic user namespace
|
||||
func (s *store) getAutoUserNS(id string, options *AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
requestedSize := uint32(0)
|
||||
initialSize := uint32(1)
|
||||
if options.Size > 0 {
|
||||
requestedSize = options.Size
|
||||
}
|
||||
if options.InitialSize > 0 {
|
||||
initialSize = options.InitialSize
|
||||
}
|
||||
|
||||
availableUIDs, availableGIDs, err := s.getAvailableMappings()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "cannot read mappings")
|
||||
}
|
||||
|
||||
// Look every container that is using a user namespace and store
|
||||
// the intervals that are already used.
|
||||
containers, err := s.Containers()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var usedUIDs, usedGIDs []idtools.IDMap
|
||||
for _, c := range containers {
|
||||
usedUIDs = append(usedUIDs, c.UIDMap...)
|
||||
usedGIDs = append(usedGIDs, c.GIDMap...)
|
||||
}
|
||||
|
||||
size := requestedSize
|
||||
|
||||
// If there is no requestedSize, lookup the maximum used IDs in the layers
|
||||
// metadata. Make sure the size is at least s.autoNsMinSize and it is not
|
||||
// bigger than s.autoNsMaxSize.
|
||||
// This is a best effort heuristic.
|
||||
if requestedSize == 0 {
|
||||
size = initialSize
|
||||
if s.autoNsMinSize > size {
|
||||
size = s.autoNsMinSize
|
||||
}
|
||||
if image != nil {
|
||||
sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if sizeFromImage > size {
|
||||
size = sizeFromImage
|
||||
}
|
||||
}
|
||||
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
|
||||
return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize)
|
||||
}
|
||||
}
|
||||
// Make sure the specified additional IDs are not used as part of the automatic
|
||||
// mapping
|
||||
usedUIDs = append(usedUIDs, options.AdditionalUIDMappings...)
|
||||
usedGIDs = append(usedGIDs, options.AdditionalGIDMappings...)
|
||||
availableUIDs, availableGIDs, err = findAvailableRange(size, size, availableUIDs, availableGIDs, usedUIDs, usedGIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We need to make sure the specified container IDs are also dropped from the automatic
|
||||
// namespaces we have found.
|
||||
if len(options.AdditionalUIDMappings) > 0 {
|
||||
availableUIDs = subtractAll(availableUIDs, options.AdditionalUIDMappings, false)
|
||||
}
|
||||
if len(options.AdditionalGIDMappings) > 0 {
|
||||
availableGIDs = subtractAll(availableGIDs, options.AdditionalGIDMappings, false)
|
||||
}
|
||||
|
||||
return append(availableUIDs, options.AdditionalUIDMappings...), append(availableGIDs, options.AdditionalGIDMappings...), nil
|
||||
}
|
Reference in New Issue
Block a user