mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 07:45:02 +00:00
Compare commits
32 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
77b49d9c4a | ||
|
4c3532e3c6 | ||
|
f2ec065a89 | ||
|
7193ea03f9 | ||
|
beeb0dcaaa | ||
|
0de3177ddd | ||
|
45c8dfa19f | ||
|
186ac33156 | ||
|
bdc24b84a4 | ||
|
e5d6d21178 | ||
|
0379855592 | ||
|
958b8c32e1 | ||
|
b0b95d1721 | ||
|
f85891e362 | ||
|
946524f90d | ||
|
2cbd97ff3a | ||
|
a4d77f8f99 | ||
|
adcb459fd2 | ||
|
55ae67be0f | ||
|
848215eef0 | ||
|
7bfff97f57 | ||
|
a73f5f9b65 | ||
|
0288eedbc3 | ||
|
b27237b7ff | ||
|
c9aed37fa7 | ||
|
788b889d14 | ||
|
ef92f23221 | ||
|
562fcc2421 | ||
|
54be45dcff | ||
|
413572a8e3 | ||
|
ecc41ce370 | ||
|
dd6501a642 |
@@ -111,6 +111,8 @@ Build packages specifying multiple definition trees:
|
||||
onlydeps := viper.GetBool("onlydeps")
|
||||
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
rebuild, _ := cmd.Flags().GetBool("rebuild")
|
||||
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
var results Results
|
||||
backendArgs := viper.GetStringSlice("backend-args")
|
||||
@@ -176,6 +178,7 @@ Build packages specifying multiple definition trees:
|
||||
options.WithBuildValues(values),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.Rebuild(rebuild),
|
||||
options.WithSolverOptions(*opts),
|
||||
options.Wait(wait),
|
||||
options.OnlyTarget(onlyTarget),
|
||||
@@ -329,7 +332,7 @@ func init() {
|
||||
buildCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
buildCmd.Flags().Bool("live-output", LuetCfg.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
|
||||
buildCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
|
||||
buildCmd.Flags().Bool("rebuild", false, "To combine with --pull. Allows to rebuild the target package even if an image is available, against a local values file")
|
||||
buildCmd.Flags().Bool("pretend", false, "Just print what packages will be compiled")
|
||||
buildCmd.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
|
||||
|
||||
|
@@ -40,7 +40,7 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.13.0"
|
||||
LuetCLIVersion = "0.14.6"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
|
3
go.mod
3
go.mod
@@ -16,7 +16,6 @@ require (
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
|
||||
github.com/fsouza/go-dockerclient v1.6.4
|
||||
github.com/genuinetools/img v0.5.11
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/google/go-containerregistry v0.2.1
|
||||
@@ -37,7 +36,7 @@ require (
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87
|
||||
github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82
|
||||
github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
github.com/onsi/ginkgo v1.14.2
|
||||
github.com/onsi/gomega v1.10.3
|
||||
|
29
go.sum
29
go.sum
@@ -91,13 +91,12 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Sabayon/pkgs-checker v0.7.2 h1:mh53u5D7FTCeBJevYQA9cCxAWGTSuKqw7m/x7GsQVb0=
|
||||
github.com/Sabayon/pkgs-checker v0.7.2/go.mod h1:GFGM6ZzSE5owdGgjLnulj0+Vt9UTd5LFGmB2AOVPYrE=
|
||||
github.com/Sabayon/pkgs-checker v0.8.1 h1:pVen975z9WIecq7luntUn+0XzGdiyz2CsDay8w+ZmOw=
|
||||
github.com/Sabayon/pkgs-checker v0.8.1/go.mod h1:GC9PBUzcq0QVEBGRA1IIMXf6wHxo34KH5BeqoyJsLpo=
|
||||
github.com/Sereal/Sereal v0.0.0-20181211220259-509a78ddbda3 h1:Xu7z47ZiE/J+sKXHZMGxEor/oY2q6dq51fkO0JqdSwY=
|
||||
github.com/Sereal/Sereal v0.0.0-20181211220259-509a78ddbda3/go.mod h1:D0JMgToj/WdxCgd30Kc1UcA9E+WdZoJqeVOuYW7iTBM=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
@@ -146,18 +145,24 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
|
||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31 h1:yInAg9pE5qGec5eQ7XdfOTTaGwGxD3bKFVjmD6VKkwc=
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 h1:s7+5BfS4WFJoVF9pnB8kBk03S7pZXRdKamnV0FOl5Sc=
|
||||
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec h1:4XvMn0XuV7qxCH22gbnR79r+xTUaLOSA0GW/egpO3SQ=
|
||||
@@ -177,13 +182,12 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d h1:UKAt78F1OvM4ceTn1VvXuYuatXohsFU1eSI2IBtTw9g=
|
||||
@@ -255,6 +259,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
||||
github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73 h1:OGNva6WhsKst5OZf7eZOklDztV3hwtTHovdrLHV+MsA=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
@@ -315,6 +320,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
@@ -400,6 +406,7 @@ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@@ -432,6 +439,7 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5
|
||||
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -470,6 +478,7 @@ github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkY
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -532,6 +541,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
@@ -591,8 +601,11 @@ github.com/jedib0t/go-pretty/v6 v6.0.5/go.mod h1:MTr6FgcfNdnN5wPVBzJ6mhJeDyiF0yB
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs=
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=
|
||||
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE=
|
||||
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
|
||||
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc=
|
||||
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=
|
||||
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
@@ -613,6 +626,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
|
||||
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
|
||||
@@ -649,6 +663,7 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
||||
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
@@ -743,8 +758,8 @@ github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d h1:fKh+rvw
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d/go.mod h1:puRUWSwyecW2V355tKncwPVPRAjQBduPsFjG0mrV/Nw=
|
||||
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87 h1:mGz7T8KvmHH0gLWPI5tQne8xl2cO3T8wrrb6Aa16Jxo=
|
||||
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87/go.mod h1:1w4zI1LYXDeiUXqedPcrT5eQJnmKR6dbg5iJMgSIP/Y=
|
||||
github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82 h1:Hkefw2tzoKATVUTFsCtDlUnY180+OE851qGbq45ATxk=
|
||||
github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82/go.mod h1:4P/ULate+2QxoAQtojaRjyO5VGMhV0KLnSdAS8nuBbo=
|
||||
github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af h1:jixIxEgLSqu24eMiyzfCI+roa5IaOUhF546ePSFyHeY=
|
||||
github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 h1:426hFyXMpXeqIeGJn2cGAW9ogvM2Jf+Jv23gtVPvBLM=
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290/go.mod h1:uP5BBgFxq2wNWo7n1vnY5SSbgL0WDshVJrOO12tZ/lA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
@@ -1450,6 +1465,7 @@ google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEG
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
||||
gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1472,6 +1488,7 @@ gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
|
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
capi "github.com/mudler/docker-companion/api"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
@@ -56,24 +55,6 @@ func (*SimpleDocker) BuildImage(opts Options) error {
|
||||
|
||||
Info(":whale: Building image " + name + " done")
|
||||
|
||||
if os.Getenv("DOCKER_SQUASH") == "true" {
|
||||
Info(":whale: Squashing image " + name)
|
||||
var client *docker.Client
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
client, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not connect to the Docker daemon")
|
||||
}
|
||||
err = capi.Squash(client, name, name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed squashing image")
|
||||
}
|
||||
|
||||
Info(":whale: Squashing image " + name + " done")
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
|
||||
return nil
|
||||
|
@@ -295,6 +295,17 @@ func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *co
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string {
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
// TODO: We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// and identifies the deptree of the package.
|
||||
return fmt.Sprintf("builder-%s", p.GetPackage().HashFingerprint(helpers.StripRegistryFromImage(packageImage)))
|
||||
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string,
|
||||
concurrency int, keepPermissions bool,
|
||||
p *compilerspec.LuetCompilationSpec) (backend.Options, backend.Options, error) {
|
||||
@@ -303,20 +314,6 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
|
||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
// TODO: We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// and identifies the deptree of the package.
|
||||
|
||||
fp := p.GetPackage().HashFingerprint(helpers.StripRegistryFromImage(packageImage))
|
||||
|
||||
if buildertaggedImage == "" {
|
||||
buildertaggedImage = cs.Options.PushImageRepository + ":builder-" + fp
|
||||
Debug(pkgTag, "Creating intermediary image", buildertaggedImage, "from", image)
|
||||
}
|
||||
|
||||
// TODO: Cleanup, not actually hit
|
||||
if packageImage == "" {
|
||||
return runnerOpts, builderOpts, errors.New("no package image given")
|
||||
@@ -354,9 +351,15 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
||||
}
|
||||
|
||||
if len(p.GetPreBuildSteps()) == 0 {
|
||||
buildertaggedImage = image
|
||||
}
|
||||
// Even if we don't have prelude steps, we want to push
|
||||
// An intermediate image to tag images which are outside of the tree.
|
||||
// Those don't have an hash otherwise, and thus makes build unreproducible
|
||||
// see SKIPBUILD for the other logic
|
||||
// if len(p.GetPreBuildSteps()) == 0 {
|
||||
// buildertaggedImage = image
|
||||
// }
|
||||
// We might want to skip this phase but replacing with a tag that we push. But in case
|
||||
// steps in prelude are == 0 those are equivalent.
|
||||
|
||||
// Then we write the step image, which uses the builder one
|
||||
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile")); err != nil {
|
||||
@@ -401,12 +404,13 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if len(p.GetPreBuildSteps()) != 0 {
|
||||
Info(pkgTag, ":whale: Generating 'builder' image from", image, "as", buildertaggedImage, "with prelude steps")
|
||||
if err := buildAndPush(builderOpts); err != nil {
|
||||
return builderOpts, runnerOpts, errors.Wrapf(err, "Could not push image: %s %s", image, builderOpts.DockerFileName)
|
||||
}
|
||||
// SKIPBUILD
|
||||
// if len(p.GetPreBuildSteps()) != 0 {
|
||||
Info(pkgTag, ":whale: Generating 'builder' image from", image, "as", buildertaggedImage, "with prelude steps")
|
||||
if err := buildAndPush(builderOpts); err != nil {
|
||||
return builderOpts, runnerOpts, errors.Wrapf(err, "Could not push image: %s %s", image, builderOpts.DockerFileName)
|
||||
}
|
||||
//}
|
||||
|
||||
// Even if we might not have any steps to build, we do that so we can tag the image used in this moment and use that to cache it in a registry, or in the system.
|
||||
// acting as a docker tag.
|
||||
@@ -425,7 +429,7 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
var rootfs string
|
||||
var err error
|
||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||
|
||||
Debug(pkgTag, "Generating artifact")
|
||||
// We can't generate delta in this case. It implies the package is a virtual, and nothing has to be done really
|
||||
if p.EmptyPackage() {
|
||||
fakePackage := p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar")
|
||||
@@ -470,7 +474,7 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
|
||||
filelist, err := a.FileList()
|
||||
if err != nil {
|
||||
return a, errors.Wrap(err, "Failed getting package list")
|
||||
return a, errors.Wrapf(err, "Failed getting package list for '%s' '%s'", a.Path, a.CompileSpec.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
a.Files = filelist
|
||||
@@ -518,10 +522,11 @@ func oneOfImagesAvailable(images []string, b CompilerBackend) (bool, string) {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string) string {
|
||||
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string {
|
||||
var resolvedImage string
|
||||
Debug("Resolving image hash for", p.Package.HumanReadableString(), "hash", imageHash, "Pull repositories", p.BuildOptions.PullImageRepository)
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)},
|
||||
genImageList(cs.Options.PullImageRepository, imageHash)...)
|
||||
genImageList(p.BuildOptions.PullImageRepository, imageHash)...)
|
||||
if exists, which := oneOfImagesExists(toChecklist, cs.Backend); exists {
|
||||
resolvedImage = which
|
||||
}
|
||||
@@ -555,9 +560,10 @@ func LoadArtifactFromYaml(spec *compilerspec.LuetCompilationSpec) (*artifact.Pac
|
||||
func (cs *LuetCompiler) getImageArtifact(hash string, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
// we check if there is an available image with the given hash and
|
||||
// we return a full artifact if can be loaded locally.
|
||||
Debug("Get image artifact for", p.Package.HumanReadableString(), "hash", hash, "Pull repositories", p.BuildOptions.PullImageRepository)
|
||||
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, hash)},
|
||||
genImageList(cs.Options.PullImageRepository, hash)...)
|
||||
genImageList(p.BuildOptions.PullImageRepository, hash)...)
|
||||
|
||||
exists, _ := oneOfImagesExists(toChecklist, cs.Backend)
|
||||
if art, err := LoadArtifactFromYaml(p); err == nil && exists { // If YAML is correctly loaded, and both images exists, no reason to rebuild.
|
||||
@@ -578,7 +584,7 @@ func (cs *LuetCompiler) getImageArtifact(hash string, p *compilerspec.LuetCompil
|
||||
// image buildertaggedImage.
|
||||
// Images that can be resolved from repositories are prefered over the local ones if PullFirst is set to true
|
||||
// avoiding to rebuild images as much as possible
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage string, packageTagHash string,
|
||||
func (cs *LuetCompiler) compileWithImage(image, builderHash string, packageTagHash string,
|
||||
concurrency int,
|
||||
keepPermissions, keepImg bool,
|
||||
p *compilerspec.LuetCompilationSpec, generateArtifact bool) (*artifact.PackageArtifact, error) {
|
||||
@@ -591,17 +597,40 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage string, packa
|
||||
}
|
||||
|
||||
if !generateArtifact {
|
||||
// try to avoid regenerating the image if possible by checking the hash in the
|
||||
// given repositories
|
||||
// It is best effort. If we fail resolving, we will generate the images and keep going
|
||||
if art, err := cs.getImageArtifact(packageTagHash, p); err == nil {
|
||||
// try to avoid regenerating the image if possible by checking the hash in the
|
||||
// given repositories
|
||||
// It is best effort. If we fail resolving, we will generate the images and keep going
|
||||
return art, nil
|
||||
}
|
||||
}
|
||||
|
||||
// always going to point at the destination from the repo defined
|
||||
packageImage := fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, packageTagHash)
|
||||
builderOpts, runnerOpts, err := cs.buildPackageImage(image, buildertaggedImage, packageImage, concurrency, keepPermissions, p)
|
||||
remoteBuildertaggedImage := fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, builderHash)
|
||||
builderResolved := cs.resolveExistingImageHash(builderHash, p)
|
||||
//generated := false
|
||||
// if buildertaggedImage == "" {
|
||||
// buildertaggedImage = fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, buildertaggedImage)
|
||||
// generated = true
|
||||
// // Debug(pkgTag, "Creating intermediary image", buildertaggedImage, "from", image)
|
||||
// }
|
||||
|
||||
if cs.Options.PullFirst && !cs.Options.Rebuild {
|
||||
Debug("Checking if an image is already available")
|
||||
// FIXUP here. If packageimage hash exists and pull is true, generate package
|
||||
resolved := cs.resolveExistingImageHash(packageTagHash, p)
|
||||
|
||||
//
|
||||
if resolved != packageImage && remoteBuildertaggedImage != builderResolved { // an image is there already
|
||||
Debug("Images available for", p.Package.HumanReadableString(), "generating artifact from remote images:", resolved)
|
||||
return cs.genArtifact(p, backend.Options{ImageName: builderResolved}, backend.Options{ImageName: resolved}, concurrency, keepPermissions)
|
||||
} else {
|
||||
Debug("Images not available for", p.Package.HumanReadableString())
|
||||
}
|
||||
}
|
||||
|
||||
// always going to point at the destination from the repo defined
|
||||
builderOpts, runnerOpts, err := cs.buildPackageImage(image, builderResolved, packageImage, concurrency, keepPermissions, p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed building package image")
|
||||
}
|
||||
@@ -698,6 +727,7 @@ func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (sol
|
||||
BuildHash: nthsolution.HashFrom(assertion.Package),
|
||||
PackageHash: nthsolution.AssertionHash(),
|
||||
}
|
||||
assertion.Package.SetTreeDir(p.Package.GetTreeDir())
|
||||
assertions = append(assertions, assertion)
|
||||
}
|
||||
}
|
||||
@@ -710,7 +740,7 @@ func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (sol
|
||||
func (cs *LuetCompiler) Compile(keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
asserts, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
p.SetSourceAssertion(asserts)
|
||||
return cs.compile(cs.Options.Concurrency, keepPermissions, p)
|
||||
@@ -724,13 +754,30 @@ func genImageList(refs []string, hash string) []string {
|
||||
return res
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
if len(p.BuildOptions.PullImageRepository) != 0 {
|
||||
orig := cs.Options.PullImageRepository
|
||||
cs.Options.PullImageRepository = append(orig, p.BuildOptions.PullImageRepository...)
|
||||
defer func() { cs.Options.PullImageRepository = orig }()
|
||||
func (cs *LuetCompiler) inheritSpecBuildOptions(p *compilerspec.LuetCompilationSpec) {
|
||||
Debug(p.GetPackage().HumanReadableString(), "Build options before inherit", p.BuildOptions)
|
||||
|
||||
// Append push repositories from buildpsec buildoptions as pull if found.
|
||||
// This allows to resolve the hash automatically if we pulled the metadata from
|
||||
// repositories that are advertizing their cache.
|
||||
if len(p.BuildOptions.PushImageRepository) != 0 {
|
||||
p.BuildOptions.PullImageRepository = append(p.BuildOptions.PullImageRepository, p.BuildOptions.PushImageRepository)
|
||||
Debug("Inheriting pull repository from PushImageRepository buildoptions", p.BuildOptions.PullImageRepository)
|
||||
}
|
||||
|
||||
if len(cs.Options.PullImageRepository) != 0 {
|
||||
p.BuildOptions.PullImageRepository = append(p.BuildOptions.PullImageRepository, cs.Options.PullImageRepository...)
|
||||
Debug("Inheriting pull repository from PullImageRepository buildoptions", p.BuildOptions.PullImageRepository)
|
||||
}
|
||||
Debug(p.GetPackage().HumanReadableString(), "Build options after inherit", p.BuildOptions)
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
// TODO: Racy, remove it
|
||||
// Inherit build options from compilation specs metadata
|
||||
// orig := cs.Options.PullImageRepository
|
||||
// defer func() { cs.Options.PullImageRepository = orig }()
|
||||
|
||||
Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
|
||||
|
||||
Debug(fmt.Sprintf("%s: has images %t, empty package: %t", p.GetPackage().HumanReadableString(), p.HasImageSource(), p.EmptyPackage()))
|
||||
@@ -753,12 +800,15 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
})
|
||||
|
||||
// Update compilespec build options - it will be then serialized into the compilation metadata file
|
||||
p.SetBuildOptions(cs.Options)
|
||||
//p.SetBuildOptions(cs.Options)
|
||||
p.BuildOptions.PushImageRepository = cs.Options.PushImageRepository
|
||||
//p.BuildOptions.BuildValues = cs.Options.BuildValues
|
||||
//p.BuildOptions.BuildValuesFile = cs.Options.BuildValuesFile
|
||||
|
||||
// - If image is set we just generate a plain dockerfile
|
||||
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
|
||||
if p.GetImage() != "" {
|
||||
return cs.compileWithImage(p.GetImage(), "", targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
return cs.compileWithImage(p.GetImage(), cs.genBuilderImageTag(p, targetAssertion.Hash.PackageHash), targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
}
|
||||
|
||||
// - If image is not set, we read a base_image. Then we will build one image from it to kick-off our build based
|
||||
@@ -773,7 +823,10 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
currentN := 0
|
||||
|
||||
packageDeps := !cs.Options.PackageTargetOnly
|
||||
if !cs.Options.NoDeps {
|
||||
buildDeps := !cs.Options.NoDeps
|
||||
buildTarget := !cs.Options.OnlyDeps
|
||||
|
||||
if buildDeps {
|
||||
Info(":deciduous_tree: Build dependencies for " + p.GetPackage().HumanReadableString())
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
depsN++
|
||||
@@ -788,6 +841,9 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
|
||||
}
|
||||
compileSpec.BuildOptions.PullImageRepository = append(compileSpec.BuildOptions.PullImageRepository, p.BuildOptions.PullImageRepository...)
|
||||
Debug("PullImage repos:", compileSpec.BuildOptions.PullImageRepository)
|
||||
|
||||
compileSpec.SetOutputPath(p.GetOutputPath())
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from hash", assertion.Hash.BuildHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image from hash", assertion.Hash.PackageHash)
|
||||
@@ -804,11 +860,11 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
// for the source instead, pick an image and a buildertaggedImage from hashes if they exists.
|
||||
// otherways fallback to the pushed repo
|
||||
// Resolve images from the hashtree
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(assertion.Hash.BuildHash)
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(assertion.Hash.BuildHash, compileSpec)
|
||||
if compileSpec.GetImage() != "" {
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from image")
|
||||
|
||||
a, err := cs.compileWithImage(compileSpec.GetImage(), resolvedBuildImage, assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
a, err := cs.compileWithImage(compileSpec.GetImage(), assertion.Hash.BuildHash, assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
}
|
||||
@@ -818,7 +874,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
}
|
||||
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from tree")
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, "", assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, cs.genBuilderImageTag(compileSpec, targetAssertion.Hash.PackageHash), assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
}
|
||||
@@ -839,11 +895,11 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
lastHash = dependencies[len(dependencies)-1].Hash.PackageHash
|
||||
}
|
||||
|
||||
if !cs.Options.OnlyDeps {
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(lastHash)
|
||||
if buildTarget {
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(lastHash, p)
|
||||
Info(":rocket: All dependencies are satisfied, building package requested by the user", p.GetPackage().HumanReadableString())
|
||||
Info(":package:", p.GetPackage().HumanReadableString(), " Using image: ", resolvedBuildImage)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, "", targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, cs.genBuilderImageTag(p, targetAssertion.Hash.PackageHash), targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
@@ -866,18 +922,11 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
|
||||
type templatedata map[string]interface{}
|
||||
|
||||
func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.Package) ([]byte, error) {
|
||||
func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.Package, dst templatedata) ([]byte, error) {
|
||||
|
||||
var dataresult []byte
|
||||
val := pack.Rel(DefinitionFile)
|
||||
|
||||
// Update processed build values
|
||||
dst, err := helpers.UnMarshalValues(cs.Options.BuildValuesFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
cs.Options.BuildValues = append(vals, (map[string]interface{})(dst))
|
||||
|
||||
if _, err := os.Stat(pack.Rel(CollectionFile)); err == nil {
|
||||
val = pack.Rel(CollectionFile)
|
||||
|
||||
@@ -956,7 +1005,7 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilatio
|
||||
|
||||
opts := options.Compiler{}
|
||||
|
||||
artifactMetadataFile := filepath.Join(p.GetTreeDir(), "..", p.GetMetadataFilePath())
|
||||
artifactMetadataFile := filepath.Join(pack.GetTreeDir(), "..", pack.GetMetadataFilePath())
|
||||
Debug("Checking if metadata file is present", artifactMetadataFile)
|
||||
if _, err := os.Stat(artifactMetadataFile); err == nil {
|
||||
f, err := os.Open(artifactMetadataFile)
|
||||
@@ -972,17 +1021,24 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilatio
|
||||
return nil, errors.Wrap(err, "could not decode package from yaml")
|
||||
}
|
||||
|
||||
Debug("Read build options:", art.CompileSpec.BuildOptions)
|
||||
opts = art.CompileSpec.BuildOptions
|
||||
opts.PushImageRepository = ""
|
||||
|
||||
Debug("Read build options:", art.CompileSpec.BuildOptions, "from", artifactMetadataFile)
|
||||
if art.CompileSpec.BuildOptions != nil {
|
||||
opts = *art.CompileSpec.BuildOptions
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
Debug("error reading already existing artifact metadata file: ", err.Error())
|
||||
Debug("error reading artifact metadata file: ", err.Error())
|
||||
} else if os.IsNotExist(err) {
|
||||
Debug("metadata file not present, skipping", artifactMetadataFile)
|
||||
}
|
||||
|
||||
bytes, err := cs.templatePackage(opts.BuildValues, pack)
|
||||
// Update processed build values
|
||||
dst, err := helpers.UnMarshalValues(cs.Options.BuildValuesFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
opts.BuildValues = append(opts.BuildValues, (map[string]interface{})(dst))
|
||||
|
||||
bytes, err := cs.templatePackage(opts.BuildValues, pack, templatedata(dst))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while rendering package template")
|
||||
}
|
||||
@@ -991,7 +1047,9 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilatio
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newSpec.BuildOptions = opts
|
||||
newSpec.BuildOptions = &opts
|
||||
|
||||
cs.inheritSpecBuildOptions(newSpec)
|
||||
|
||||
return newSpec, err
|
||||
}
|
||||
|
@@ -19,6 +19,8 @@ import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -27,7 +29,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
system "github.com/docker/docker/pkg/system"
|
||||
zstd "github.com/klauspost/compress/zstd"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
@@ -317,7 +318,6 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
default:
|
||||
return helpers.Tar(src, a.getCompressedName())
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) getCompressedName() string {
|
||||
@@ -340,6 +340,28 @@ func (a *PackageArtifact) GetUncompressedName() string {
|
||||
return a.Path
|
||||
}
|
||||
|
||||
func hashContent(bv []byte) string {
|
||||
hasher := sha1.New()
|
||||
hasher.Write(bv)
|
||||
sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
||||
return sha
|
||||
}
|
||||
|
||||
func hashFileContent(path string) (string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha1.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
// If the destination path already exists I rename target file name with postfix.
|
||||
var destPath string
|
||||
@@ -351,6 +373,7 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
tarHash := hashContent(buffer.Bytes())
|
||||
|
||||
// If file is not present on archive but is defined on mods
|
||||
// I receive the callback. Prevent nil exception.
|
||||
@@ -363,8 +386,21 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
return header, buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
existingHash := ""
|
||||
f, err := os.Lstat(destPath)
|
||||
if err == nil {
|
||||
Debug("File exists already, computing hash for", destPath)
|
||||
hash, herr := hashFileContent(destPath)
|
||||
if herr == nil {
|
||||
existingHash = hash
|
||||
}
|
||||
}
|
||||
|
||||
Debug("Existing file hash: ", existingHash, "Tar file hashsum: ", tarHash)
|
||||
// We want to protect file only if the hash of the files are differing OR the file size are
|
||||
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
|
||||
// Check if exists
|
||||
if helpers.Exists(destPath) {
|
||||
if helpers.Exists(destPath) && differs {
|
||||
for i := 1; i < 1000; i++ {
|
||||
name := filepath.Join(filepath.Join(filepath.Dir(path),
|
||||
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
|
||||
@@ -584,47 +620,16 @@ type CopyJob struct {
|
||||
Artifact string
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
defer wg.Done()
|
||||
|
||||
for job := range s {
|
||||
//Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
|
||||
// if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
|
||||
// err = helpers.CopyDir(job.Src, job.Dst)
|
||||
// if err != nil {
|
||||
// Warning("Error copying dir", job, err)
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
|
||||
_, err := os.Lstat(job.Dst)
|
||||
if err != nil {
|
||||
Debug("Copying ", job.Src)
|
||||
if err := helpers.CopyFile(job.Src, job.Dst); err != nil {
|
||||
if err := helpers.DeepCopyFile(job.Src, job.Dst); err != nil {
|
||||
Warning("Error copying", job, err)
|
||||
}
|
||||
doCopyXattrs(job.Src, job.Dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -38,6 +38,7 @@ type Compiler struct {
|
||||
BuildValues []map[string]interface{}
|
||||
|
||||
PackageTargetOnly bool
|
||||
Rebuild bool
|
||||
|
||||
BackendArgs []string
|
||||
|
||||
@@ -131,6 +132,13 @@ func KeepImg(b bool) func(cfg *Compiler) error {
|
||||
}
|
||||
}
|
||||
|
||||
func Rebuild(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Rebuild = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PushImages(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Push = b
|
||||
|
@@ -102,7 +102,7 @@ type LuetCompilationSpec struct {
|
||||
Includes []string `json:"includes"`
|
||||
Excludes []string `json:"excludes"`
|
||||
|
||||
BuildOptions options.Compiler `json:"build_options"`
|
||||
BuildOptions *options.Compiler `json:"build_options"`
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
|
||||
@@ -119,7 +119,7 @@ func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) SetBuildOptions(b options.Compiler) {
|
||||
cs.BuildOptions = b
|
||||
cs.BuildOptions = &b
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
|
@@ -90,8 +90,7 @@ func UntarProtect(src, dst string, sameOwner bool, protectedFiles []string, modi
|
||||
}
|
||||
|
||||
if sameOwner {
|
||||
// PRE: i have root privileged.
|
||||
|
||||
// we do have root permissions, so we can extract keeping the same permissions.
|
||||
replacerArchive := archive.ReplaceFileTarWrapper(in, mods)
|
||||
|
||||
opts := &archive.TarOptions{
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
@@ -122,3 +123,7 @@ func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthCon
|
||||
err = c.Unpack(image, dest)
|
||||
return listedImage, err
|
||||
}
|
||||
|
||||
func StripInvalidStringsFromImage(s string) string {
|
||||
return strings.ReplaceAll(s, "+", "-")
|
||||
}
|
||||
|
30
pkg/helpers/docker_test.go
Normal file
30
pkg/helpers/docker_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("StripInvalidStringsFromImage", func() {
|
||||
Context("Image names", func() {
|
||||
It("strips invalid chars", func() {
|
||||
Expect(StripInvalidStringsFromImage("foo+bar")).To(Equal("foo-bar"))
|
||||
})
|
||||
})
|
||||
})
|
@@ -27,6 +27,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/google/renameio"
|
||||
copy "github.com/otiai10/copy"
|
||||
"github.com/pkg/errors"
|
||||
@@ -167,9 +168,27 @@ func Read(file string) (string, error) {
|
||||
return string(dat), nil
|
||||
}
|
||||
|
||||
func EnsureDirPerm(src, dst string) {
|
||||
if info, err := os.Lstat(filepath.Dir(src)); err == nil {
|
||||
if _, err := os.Lstat(filepath.Dir(dst)); os.IsNotExist(err) {
|
||||
err := os.MkdirAll(filepath.Dir(dst), info.Mode().Perm())
|
||||
if err != nil {
|
||||
fmt.Println("warning: failed creating", filepath.Dir(dst), err.Error())
|
||||
}
|
||||
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
|
||||
if err := os.Lchown(filepath.Dir(dst), int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
fmt.Println("warning: failed chowning", filepath.Dir(dst), err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
EnsureDir(dst)
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureDir(fileName string) error {
|
||||
dirName := filepath.Dir(fileName)
|
||||
if _, serr := os.Stat(dirName); serr != nil {
|
||||
if _, serr := os.Stat(dirName); os.IsNotExist(serr) {
|
||||
merr := os.MkdirAll(dirName, os.ModePerm) // FIXME: It should preserve permissions from src to dst instead
|
||||
if merr != nil {
|
||||
return merr
|
||||
@@ -178,12 +197,39 @@ func EnsureDir(fileName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile copies the contents of the file named src to the file named
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
return copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// DeepCopyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file. The file mode will be copied from the source and
|
||||
// the copied data is synced/flushed to stable storage.
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
func DeepCopyFile(src, dst string) (err error) {
|
||||
// Workaround for https://github.com/otiai10/copy/issues/47
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
@@ -193,7 +239,7 @@ func CopyFile(src, dst string) (err error) {
|
||||
fm := fi.Mode()
|
||||
switch {
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
EnsureDir(dst)
|
||||
EnsureDirPerm(src, dst)
|
||||
if err := syscall.Mkfifo(dst, uint32(fi.Mode())); err != nil {
|
||||
return errors.Wrap(err, "failed creating pipe")
|
||||
}
|
||||
@@ -205,6 +251,9 @@ func CopyFile(src, dst string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
//filepath.Dir(src)
|
||||
EnsureDirPerm(src, dst)
|
||||
|
||||
err = copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
@@ -216,7 +265,8 @@ func CopyFile(src, dst string) (err error) {
|
||||
fmt.Println("warning: failed chowning", dst, err.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
return doCopyXattrs(src, dst)
|
||||
}
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
||||
|
@@ -17,9 +17,18 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func ReverseAny(s interface{}) {
|
||||
n := reflect.ValueOf(s).Len()
|
||||
swap := reflect.Swapper(s)
|
||||
for i, j := 0, n-1; i < j; i, j = i+1, j-1 {
|
||||
swap(i, j)
|
||||
}
|
||||
}
|
||||
|
||||
func MapMatchRegex(m *map[string]string, r *regexp.Regexp) bool {
|
||||
ans := false
|
||||
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
@@ -138,8 +139,8 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
var file *os.File = nil
|
||||
var err error
|
||||
var temp string
|
||||
|
||||
var temp, contentstore string
|
||||
var info *imgworker.ListedImage
|
||||
// Files should be in URI/repository:<file>
|
||||
ok := false
|
||||
|
||||
@@ -149,22 +150,21 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
}
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
file, err = config.LuetCfg.GetSystem().TempFile("DockerClient")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
contentstore, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
contentstore, err = config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Warning("Cannot create contentstore", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, name)
|
||||
imageName := fmt.Sprintf("%s:%s", uri, helpers.StripInvalidStringsFromImage(name))
|
||||
Info("Downloading", imageName)
|
||||
|
||||
info, err := helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
info, err = helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
@@ -175,7 +175,6 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, name), "to", file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -128,6 +128,8 @@ func packsToList(p pkg.Packages) string {
|
||||
for _, pp := range p {
|
||||
packs = append(packs, pp.HumanReadableString())
|
||||
}
|
||||
|
||||
sort.Strings(packs)
|
||||
return strings.Join(packs, " ")
|
||||
}
|
||||
|
||||
@@ -137,6 +139,7 @@ func matchesToList(artefacts map[string]ArtifactMatch) string {
|
||||
for fingerprint, match := range artefacts {
|
||||
packs = append(packs, fmt.Sprintf("%s (%s)", fingerprint, match.Repository.GetName()))
|
||||
}
|
||||
sort.Strings(packs)
|
||||
return strings.Join(packs, " ")
|
||||
}
|
||||
|
||||
@@ -153,39 +156,7 @@ func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
Info(":memo: note: will consider new build revisions while upgrading")
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(syncedRepos, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
return l.checkAndUpgrade(syncedRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) SyncRepositories(inMemory bool) (Repositories, error) {
|
||||
@@ -226,11 +197,19 @@ func (l *LuetInstaller) Swap(toRemove pkg.Packages, toInstall pkg.Packages, s *S
|
||||
toRemoveFinal = append(toRemoveFinal, pp)
|
||||
}
|
||||
}
|
||||
o := Option{
|
||||
FullUninstall: false,
|
||||
Force: true,
|
||||
CheckConflicts: false,
|
||||
FullCleanUninstall: false,
|
||||
NoDeps: l.Options.NoDeps,
|
||||
OnlyDeps: false,
|
||||
}
|
||||
|
||||
return l.swap(syncedRepos, toRemoveFinal, toInstall, s, false)
|
||||
return l.swap(o, syncedRepos, toRemoveFinal, toInstall, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
func (l *LuetInstaller) computeSwap(o Option, syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
syncedRepos.SyncDatabase(allRepos)
|
||||
@@ -245,8 +224,8 @@ func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packa
|
||||
|
||||
systemAfterChanges := &System{Database: installedtmp}
|
||||
|
||||
packs, err := l.computeUninstall(systemAfterChanges, toRemove...)
|
||||
if err != nil && !l.Options.Force {
|
||||
packs, err := l.computeUninstall(o, systemAfterChanges, toRemove...)
|
||||
if err != nil && !o.Force {
|
||||
Error("Failed computing uninstall for ", packsToList(toRemove))
|
||||
return nil, nil, nil, nil, errors.Wrap(err, "computing uninstall "+packsToList(toRemove))
|
||||
}
|
||||
@@ -257,30 +236,16 @@ func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packa
|
||||
}
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, toInstall, systemAfterChanges)
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(o, syncedRepos, toInstall, systemAfterChanges)
|
||||
for _, p := range toInstall {
|
||||
assertions = append(assertions, solver.PackageAssert{Package: p.(*pkg.DefaultPackage), Value: true})
|
||||
}
|
||||
return match, packages, assertions, allRepos, err
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System, forceNodeps bool) error {
|
||||
forced := l.Options.Force
|
||||
nodeps := l.Options.NoDeps
|
||||
func (l *LuetInstaller) swap(o Option, syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) error {
|
||||
|
||||
// We don't want any conflict with the installed to raise during the upgrade.
|
||||
// In this way we both force uninstalls and we avoid to check with conflicts
|
||||
// against the current system state which is pending to deletion
|
||||
// E.g. you can't check for conflicts for an upgrade of a new version of A
|
||||
// if the old A results installed in the system. This is due to the fact that
|
||||
// now the solver enforces the constraints and explictly denies two packages
|
||||
// of the same version installed.
|
||||
l.Options.Force = true
|
||||
if forceNodeps {
|
||||
l.Options.NoDeps = true
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeSwap(syncedRepos, toRemove, toInstall, s)
|
||||
match, packages, assertions, allRepos, err := l.computeSwap(o, syncedRepos, toRemove, toInstall, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing package replacement")
|
||||
}
|
||||
@@ -310,15 +275,223 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
||||
return nil
|
||||
}
|
||||
|
||||
err = l.Uninstall(s, toRemove...)
|
||||
if err != nil && !l.Options.Force {
|
||||
Error("Failed uninstall for ", packsToList(toRemove))
|
||||
return errors.Wrap(err, "uninstalling "+packsToList(toRemove))
|
||||
ops := l.getOpsWithOptions(toRemove, match, Option{
|
||||
Force: o.Force,
|
||||
NoDeps: false,
|
||||
OnlyDeps: o.OnlyDeps,
|
||||
RunFinalizers: false,
|
||||
}, o, syncedRepos, packages, assertions, allRepos)
|
||||
|
||||
err = l.runOps(ops, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed running installer options")
|
||||
}
|
||||
|
||||
l.Options.Force = forced
|
||||
l.Options.NoDeps = nodeps
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
toFinalize, err := l.getFinalizers(allRepos, assertions, match, o.NoDeps)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed getting package to finalize")
|
||||
}
|
||||
|
||||
return s.ExecuteFinalizers(toFinalize)
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Force bool
|
||||
NoDeps bool
|
||||
CheckConflicts bool
|
||||
FullUninstall bool
|
||||
FullCleanUninstall bool
|
||||
OnlyDeps bool
|
||||
RunFinalizers bool
|
||||
}
|
||||
|
||||
type operation struct {
|
||||
Option Option
|
||||
Package pkg.Package
|
||||
}
|
||||
|
||||
type installOperation struct {
|
||||
operation
|
||||
Reposiories Repositories
|
||||
Packages pkg.Packages
|
||||
Assertions solver.PackagesAssertions
|
||||
Database pkg.PackageDatabase
|
||||
Matches map[string]ArtifactMatch
|
||||
}
|
||||
|
||||
// installerOp is the operation that is sent to the
|
||||
// upgradeWorker's channel (todo)
|
||||
type installerOp struct {
|
||||
Uninstall operation
|
||||
Install installOperation
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) runOps(ops []installerOp, s *System) error {
|
||||
all := make(chan installerOp)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
// Do the real install
|
||||
for i := 0; i < l.Options.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go l.installerOpWorker(i, wg, all, s)
|
||||
}
|
||||
|
||||
for _, c := range ops {
|
||||
all <- c
|
||||
}
|
||||
close(all)
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: use installerOpWorker in place of all the other workers.
|
||||
// This one is general enough to read a list of operations and execute them.
|
||||
func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, c <-chan installerOp, s *System) error {
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
if p.Uninstall.Package != nil {
|
||||
Debug("Replacing package inplace")
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
||||
if err != nil {
|
||||
Error("Failed to generate Uninstall function for" + err.Error())
|
||||
continue
|
||||
//return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
|
||||
err = uninstall()
|
||||
if err != nil {
|
||||
Error("Failed uninstall for ", packsToList(toUninstall))
|
||||
continue
|
||||
//return errors.Wrap(err, "uninstalling "+packsToList(toUninstall))
|
||||
}
|
||||
}
|
||||
if p.Install.Package != nil {
|
||||
artMatch := p.Install.Matches[p.Install.Package.GetFingerPrint()]
|
||||
ass := p.Install.Assertions.Search(p.Install.Package.GetFingerPrint())
|
||||
packageToInstall, _ := p.Install.Packages.Find(p.Install.Package.GetPackageName())
|
||||
|
||||
err := l.install(
|
||||
p.Install.Option,
|
||||
p.Install.Reposiories,
|
||||
map[string]ArtifactMatch{p.Install.Package.GetFingerPrint(): artMatch},
|
||||
pkg.Packages{packageToInstall},
|
||||
solver.PackagesAssertions{*ass},
|
||||
p.Install.Database,
|
||||
s,
|
||||
)
|
||||
if err != nil {
|
||||
Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checks wheter we can uninstall and install in place and compose installer worker ops
|
||||
func (l *LuetInstaller) getOpsWithOptions(
|
||||
toUninstall pkg.Packages, installMatch map[string]ArtifactMatch, installOpt, uninstallOpt Option,
|
||||
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase) []installerOp {
|
||||
resOps := []installerOp{}
|
||||
for _, match := range installMatch {
|
||||
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: pack, Option: uninstallOpt},
|
||||
Install: installOperation{
|
||||
operation: operation{
|
||||
Package: match.Package,
|
||||
Option: installOpt,
|
||||
},
|
||||
Matches: installMatch,
|
||||
Packages: toInstall,
|
||||
Reposiories: syncedRepos,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
resOps = append(resOps, installerOp{
|
||||
Install: installOperation{
|
||||
operation: operation{Package: match.Package, Option: installOpt},
|
||||
Matches: installMatch,
|
||||
Reposiories: syncedRepos,
|
||||
Packages: toInstall,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range toUninstall {
|
||||
found := false
|
||||
|
||||
for _, match := range installMatch {
|
||||
if match.Package.GetPackageName() == p.GetPackageName() {
|
||||
found = true
|
||||
}
|
||||
|
||||
}
|
||||
if !found {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: p, Option: uninstallOpt},
|
||||
})
|
||||
}
|
||||
}
|
||||
return resOps
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(r, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't want any conflict with the installed to raise during the upgrade.
|
||||
// In this way we both force uninstalls and we avoid to check with conflicts
|
||||
// against the current system state which is pending to deletion
|
||||
// E.g. you can't check for conflicts for an upgrade of a new version of A
|
||||
// if the old A results installed in the system. This is due to the fact that
|
||||
// now the solver enforces the constraints and explictly denies two packages
|
||||
// of the same version installed.
|
||||
o := Option{
|
||||
FullUninstall: false,
|
||||
Force: true,
|
||||
CheckConflicts: false,
|
||||
FullCleanUninstall: false,
|
||||
NoDeps: true,
|
||||
OnlyDeps: false,
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(o, r, uninstall, toInstall, s)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
return l.swap(o, r, uninstall, toInstall, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
@@ -327,7 +500,20 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
return err
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, cp, s)
|
||||
if len(s.Database.World()) > 0 {
|
||||
Info(":thinking: Checking for available upgrades")
|
||||
if err := l.checkAndUpgrade(syncedRepos, s); err != nil {
|
||||
return errors.Wrap(err, "while checking upgrades before install")
|
||||
}
|
||||
}
|
||||
|
||||
o := Option{
|
||||
NoDeps: l.Options.NoDeps,
|
||||
Force: l.Options.Force,
|
||||
OnlyDeps: l.Options.OnlyDeps,
|
||||
RunFinalizers: true,
|
||||
}
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(o, syncedRepos, cp, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -364,12 +550,12 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
return l.install(o, syncedRepos, match, packages, assertions, allRepos, s)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
return l.install(o, syncedRepos, match, packages, assertions, allRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string]ArtifactMatch) error {
|
||||
@@ -444,7 +630,7 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
func (l *LuetInstaller) computeInstall(o Option, syncedRepos Repositories, cp pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
var p pkg.Packages
|
||||
toInstall := map[string]ArtifactMatch{}
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
@@ -477,11 +663,11 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
var packagesToInstall pkg.Packages
|
||||
var err error
|
||||
|
||||
if !l.Options.NoDeps {
|
||||
if !o.NoDeps {
|
||||
solv := solver.NewResolver(solver.Options{Type: l.Options.SolverOptions.Implementation, Concurrency: l.Options.Concurrency}, s.Database, allRepos, pkg.NewInMemoryDatabase(false), l.Options.SolverOptions.Resolver())
|
||||
solution, err = solv.Install(p)
|
||||
/// TODO: PackageAssertions needs to be a map[fingerprint]pack so lookup is in O(1)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return toInstall, p, solution, allRepos, errors.Wrap(err, "Failed solving solution for package")
|
||||
}
|
||||
// Gathers things to install
|
||||
@@ -494,7 +680,7 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
packagesToInstall = append(packagesToInstall, assertion.Package)
|
||||
}
|
||||
}
|
||||
} else if !l.Options.OnlyDeps {
|
||||
} else if !o.OnlyDeps {
|
||||
for _, currentPack := range p {
|
||||
if _, err := s.Database.FindPackage(currentPack); err == nil {
|
||||
// skip matching if it is installed already
|
||||
@@ -529,7 +715,47 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
return toInstall, p, solution, allRepos, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
func (l *LuetInstaller) getFinalizers(allRepos pkg.PackageDatabase, solution solver.PackagesAssertions, toInstall map[string]ArtifactMatch, nodeps bool) ([]pkg.Package, error) {
|
||||
var toFinalize []pkg.Package
|
||||
if !nodeps {
|
||||
// TODO: Lower those errors as warning
|
||||
for _, w := range toInstall {
|
||||
// Finalizers needs to run in order and in sequence.
|
||||
ordered, err := solution.Order(allRepos, w.Package.GetFingerPrint())
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "While order a solution for "+w.Package.HumanReadableString())
|
||||
}
|
||||
ORDER:
|
||||
for _, ass := range ordered {
|
||||
if ass.Value {
|
||||
installed, ok := toInstall[ass.Package.GetFingerPrint()]
|
||||
if !ok {
|
||||
// It was a dep already installed in the system, so we can skip it safely
|
||||
continue ORDER
|
||||
}
|
||||
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
for _, c := range toInstall {
|
||||
treePackage, err := c.Repository.GetTree().GetDatabase().FindPackage(c.Package)
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString())
|
||||
}
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
return toFinalize, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
// Install packages into rootfs in parallel.
|
||||
if err := l.download(syncedRepos, toInstall); err != nil {
|
||||
return errors.Wrap(err, "Downloading packages")
|
||||
@@ -558,46 +784,19 @@ func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]A
|
||||
for _, c := range toInstall {
|
||||
// Annotate to the system that the package was installed
|
||||
_, err := s.Database.CreatePackage(c.Package)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return errors.Wrap(err, "Failed creating package")
|
||||
}
|
||||
bus.Manager.Publish(bus.EventPackageInstall, c)
|
||||
}
|
||||
var toFinalize []pkg.Package
|
||||
if !l.Options.NoDeps {
|
||||
// TODO: Lower those errors as warning
|
||||
for _, w := range p {
|
||||
// Finalizers needs to run in order and in sequence.
|
||||
ordered, err := solution.Order(allRepos, w.GetFingerPrint())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While order a solution for "+w.HumanReadableString())
|
||||
}
|
||||
ORDER:
|
||||
for _, ass := range ordered {
|
||||
if ass.Value {
|
||||
installed, ok := toInstall[ass.Package.GetFingerPrint()]
|
||||
if !ok {
|
||||
// It was a dep already installed in the system, so we can skip it safely
|
||||
continue ORDER
|
||||
}
|
||||
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
if !o.RunFinalizers {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
for _, c := range toInstall {
|
||||
treePackage, err := c.Repository.GetTree().GetDatabase().FindPackage(c.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString())
|
||||
}
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
toFinalize, err := l.getFinalizers(allRepos, solution, toInstall, o.NoDeps)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed getting package to finalize")
|
||||
}
|
||||
|
||||
return s.ExecuteFinalizers(toFinalize)
|
||||
@@ -677,6 +876,51 @@ func (l *LuetInstaller) installerWorker(i int, wg *sync.WaitGroup, c <-chan Arti
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkAndPrunePath(path string) {
|
||||
// check if now the target path is empty
|
||||
targetPath := filepath.Dir(path)
|
||||
|
||||
fi, err := os.Lstat(targetPath)
|
||||
if err != nil {
|
||||
// Warning("Dir not found (it was before?) ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsDir():
|
||||
files, err := ioutil.ReadDir(targetPath)
|
||||
if err != nil {
|
||||
Warning("Failed reading folder", targetPath, err.Error())
|
||||
}
|
||||
if len(files) != 0 {
|
||||
Debug("Preserving not-empty folder", targetPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = os.Remove(targetPath); err != nil {
|
||||
Warning("Failed removing file (maybe not present in the system target anymore ?)", targetPath, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// We will try to cleanup every path from the file, if the folders left behind are empty
|
||||
func pruneEmptyFilePath(path string) {
|
||||
checkAndPrunePath(path)
|
||||
|
||||
// A path is for e.g. /usr/bin/bar
|
||||
// we want to create an array as "/usr", "/usr/bin", "/usr/bin/bar"
|
||||
paths := strings.Split(path, string(os.PathSeparator))
|
||||
currentPath := filepath.Join(string(os.PathSeparator), paths[0])
|
||||
allPaths := []string{currentPath}
|
||||
for _, p := range paths[1:] {
|
||||
currentPath = filepath.Join(currentPath, p)
|
||||
allPaths = append(allPaths, currentPath)
|
||||
}
|
||||
helpers.ReverseAny(allPaths)
|
||||
for _, p := range allPaths {
|
||||
checkAndPrunePath(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
var cp *config.ConfigProtect
|
||||
annotationDir := ""
|
||||
@@ -738,6 +982,8 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
if err = os.Remove(target); err != nil {
|
||||
Warning("Failed removing file (maybe not present in the system target anymore ?)", target, err.Error())
|
||||
}
|
||||
|
||||
pruneEmptyFilePath(target)
|
||||
}
|
||||
|
||||
for _, f := range notPresent {
|
||||
@@ -751,6 +997,8 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
if err = os.Remove(target); err != nil {
|
||||
Debug("Failed removing file (not present in the system target)", target, err.Error())
|
||||
}
|
||||
|
||||
pruneEmptyFilePath(target)
|
||||
}
|
||||
|
||||
err = s.Database.RemovePackageFiles(p)
|
||||
@@ -768,17 +1016,17 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.Packages, error) {
|
||||
func (l *LuetInstaller) computeUninstall(o Option, s *System, packs ...pkg.Package) (pkg.Packages, error) {
|
||||
|
||||
var toUninstall pkg.Packages
|
||||
// compute uninstall from all world - remove packages in parallel - run uninstall finalizer (in order) TODO - mark the uninstallation in db
|
||||
// Get installed definition
|
||||
checkConflicts := l.Options.CheckConflicts
|
||||
full := l.Options.FullUninstall
|
||||
if l.Options.Force == true { // IF forced, we want to remove the package and all its requires
|
||||
checkConflicts = false
|
||||
full = false
|
||||
}
|
||||
checkConflicts := o.CheckConflicts
|
||||
full := o.FullUninstall
|
||||
// if o.Force == true { // IF forced, we want to remove the package and all its requires
|
||||
// checkConflicts = false
|
||||
// full = false
|
||||
// }
|
||||
|
||||
// Create a temporary DB with the installed packages
|
||||
// so the solver is much faster finding the deptree
|
||||
@@ -788,11 +1036,11 @@ func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.P
|
||||
return toUninstall, errors.Wrap(err, "Failed create temporary in-memory db")
|
||||
}
|
||||
|
||||
if !l.Options.NoDeps {
|
||||
if !o.NoDeps {
|
||||
solv := solver.NewResolver(solver.Options{Type: l.Options.SolverOptions.Implementation, Concurrency: l.Options.Concurrency}, installedtmp, installedtmp, pkg.NewInMemoryDatabase(false), l.Options.SolverOptions.Resolver())
|
||||
var solution pkg.Packages
|
||||
var err error
|
||||
if l.Options.FullCleanUninstall {
|
||||
if o.FullCleanUninstall {
|
||||
solution, err = solv.UninstallUniverse(packs)
|
||||
if err != nil {
|
||||
return toUninstall, errors.Wrap(err, "Could not solve the uninstall constraints. Tip: try with --solver-type qlearning or with --force, or by removing packages excluding their dependencies with --nodeps")
|
||||
@@ -813,32 +1061,47 @@ func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.P
|
||||
|
||||
return toUninstall, nil
|
||||
}
|
||||
func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
|
||||
func (l *LuetInstaller) generateUninstallFn(o Option, s *System, packs ...pkg.Package) (pkg.Packages, func() error, error) {
|
||||
for _, p := range packs {
|
||||
if packs, _ := s.Database.FindPackages(p); len(packs) == 0 {
|
||||
return errors.New("Package not found in the system")
|
||||
return nil, nil, errors.New("Package not found in the system")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
toUninstall, err := l.computeUninstall(s, packs...)
|
||||
toUninstall, err := l.computeUninstall(o, s, packs...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while computing uninstall")
|
||||
return nil, nil, errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
uninstall := func() error {
|
||||
for _, p := range toUninstall {
|
||||
err := l.uninstall(p, s)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return errors.Wrap(err, "Uninstall failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return toUninstall, uninstall, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
|
||||
Spinner(32)
|
||||
o := Option{
|
||||
FullUninstall: l.Options.FullUninstall,
|
||||
Force: l.Options.Force,
|
||||
CheckConflicts: l.Options.CheckConflicts,
|
||||
FullCleanUninstall: l.Options.FullCleanUninstall,
|
||||
}
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(o, s, packs...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(toUninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
|
@@ -166,7 +166,7 @@ func (d *dockerRepositoryGenerator) pushImageFromArtifact(a *artifact.PackageArt
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed generating checksums for tree")
|
||||
}
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, a.GetFileName())
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, helpers.StripInvalidStringsFromImage(a.GetFileName()))
|
||||
|
||||
return d.pushFileFromArtifact(treeArchive, imageTree)
|
||||
}
|
||||
|
@@ -309,7 +309,7 @@ func (p *DefaultPackage) GetPackageName() string {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) ImageID() string {
|
||||
return strings.ReplaceAll(p.GetFingerPrint(), "+", "-")
|
||||
return helpers.StripInvalidStringsFromImage(p.GetFingerPrint())
|
||||
}
|
||||
|
||||
// GetBuildTimestamp returns the package build timestamp
|
||||
@@ -645,6 +645,16 @@ func (set Packages) Best(v version.Versioner) Package {
|
||||
return versionsMap[sorted[len(sorted)-1]]
|
||||
}
|
||||
|
||||
func (set Packages) Find(packageName string) (Package, error) {
|
||||
for _, p := range set {
|
||||
if p.GetPackageName() == packageName {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &DefaultPackage{}, errors.New("package not found")
|
||||
}
|
||||
|
||||
func (set Packages) Unique() Packages {
|
||||
var result Packages
|
||||
uniq := make(map[string]Package)
|
||||
|
11
tests/fixtures/docker_repo/c/build.yaml
vendored
Normal file
11
tests/fixtures/docker_repo/c/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo c > /c
|
||||
- echo c > /cd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
3
tests/fixtures/docker_repo/c/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
9
tests/fixtures/docker_repo/cat/b/build.yaml
vendored
Normal file
9
tests/fixtures/docker_repo/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/docker_repo/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
1
tests/fixtures/docker_repo/cat/b/generate.sh
vendored
Normal file
1
tests/fixtures/docker_repo/cat/b/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /artifact42
|
10
tests/fixtures/docker_repo/cat/cat2/a/build.yaml
vendored
Normal file
10
tests/fixtures/docker_repo/cat/cat2/a/build.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
8
tests/fixtures/docker_repo/cat/cat2/a/definition.yaml
vendored
Normal file
8
tests/fixtures/docker_repo/cat/cat2/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.0"
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
11
tests/fixtures/docker_repo/d/build.yaml
vendored
Normal file
11
tests/fixtures/docker_repo/d/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo s > /d
|
||||
- echo dd > /dd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
||||
|
3
tests/fixtures/docker_repo/d/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/d/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "d"
|
||||
version: "1.0"
|
3
tests/fixtures/docker_repo/interpolated/build.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/interpolated/build.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
steps:
|
||||
- echo s > /interpolated-{{.Values.foo}}-{{.Values.extra}}
|
||||
image: "alpine"
|
4
tests/fixtures/docker_repo/interpolated/definition.yaml
vendored
Normal file
4
tests/fixtures/docker_repo/interpolated/definition.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
category: "test"
|
||||
name: "interpolated"
|
||||
version: "1.0+2"
|
||||
foo: "bar"
|
3
tests/fixtures/docker_repo/z/build.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/z/build.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
steps:
|
||||
- echo s > /z
|
||||
image: "alpine"
|
3
tests/fixtures/docker_repo/z/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/z/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "z"
|
||||
version: "1.0+2"
|
11
tests/fixtures/perms/pkgA/0.1/build.yaml
vendored
Normal file
11
tests/fixtures/perms/pkgA/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
unpack: true
|
||||
includes:
|
||||
- /foo
|
||||
- /foo/bar
|
||||
- /foo/bar/.keep
|
||||
steps:
|
||||
- mkdir -p /foo/bar
|
||||
- touch /foo/bar/.keep
|
||||
- chown 100:100 /foo/bar
|
||||
- chown 101:101 /foo/bar/.keep
|
3
tests/fixtures/perms/pkgA/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/perms/pkgA/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "perms"
|
||||
version: "0.1"
|
7
tests/fixtures/perms/pkgB/0.1/build.yaml
vendored
Normal file
7
tests/fixtures/perms/pkgB/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
image: "alpine"
|
||||
|
||||
steps:
|
||||
- mkdir -p /foo/baz
|
||||
- touch /foo/baz/.keep
|
||||
- chown 100:100 /foo/baz
|
||||
- chown 101:101 /foo/baz/.keep
|
3
tests/fixtures/perms/pkgB/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/perms/pkgB/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "perms2"
|
||||
version: "0.1"
|
@@ -3,27 +3,41 @@
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
cat <<EOF > $tmpdir/default.yaml
|
||||
extra: "bar"
|
||||
foo: "baz"
|
||||
EOF
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/buildableseed" --destination $tmpdir/testbuild --compression zstd test/c@1.0 > /dev/null
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/docker_repo" \
|
||||
--destination $tmpdir/testbuild --concurrency 1 \
|
||||
--image-repository "${TEST_DOCKER_IMAGE}-cache" --push \
|
||||
--compression zstd --values $tmpdir/default.yaml \
|
||||
test/c@1.0 test/z test/interpolated #> /dev/null
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/buildableseed" \
|
||||
createres=$(luet create-repo --tree "$ROOT_DIR/tests/fixtures/docker_repo" \
|
||||
--output "${TEST_DOCKER_IMAGE}" \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
@@ -33,10 +47,11 @@ testRepo() {
|
||||
--tree-filename foo.tar \
|
||||
--meta-filename repository.meta.tar \
|
||||
--meta-compression zstd \
|
||||
--type docker --push-images --force-push
|
||||
--type docker --push-images --force-push)
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertContains 'contains image push' "$createres" 'Pushed image: quay.io/mocaccinoos/integration-test:z-test-1.0-2'
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
@@ -65,10 +80,12 @@ testInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0 test/z test/interpolated
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
assertTrue 'package interpolated installed' "[ -e '$tmpdir/testrootfs/interpolated-baz-bar' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
@@ -85,10 +102,11 @@ testUnInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet uninstall -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
luet uninstall -y --config $tmpdir/luet.yaml test/c@1.0 test/z
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z uninstalled' "[ ! -e '$tmpdir/testrootfs/z' ]"
|
||||
}
|
||||
|
||||
testInstallAgain() {
|
||||
@@ -96,11 +114,13 @@ testInstallAgain() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
output=$(luet install -y --config $tmpdir/luet.yaml test/c@1.0)
|
||||
output=$(luet install -y --config $tmpdir/luet.yaml test/c@1.0 test/z)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertNotContains 'contains warning' "$output" 'No packages to install'
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
assertTrue 'package Z in cache' "[ -e '$tmpdir/testrootfs/packages/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'package in cache' "[ -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
|
@@ -95,13 +95,6 @@ testInstall() {
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
@@ -84,26 +84,11 @@ EOF
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0 test/a@1.0 test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
|
@@ -85,26 +85,11 @@ EOF
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0 test/a@1.0 test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
|
107
tests/integration/12_config_protect_samefile.sh
Executable file
107
tests/integration/12_config_protect_samefile.sh
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testrootfs/testbuild -p
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/config_protect" \
|
||||
--destination $tmpdir/testrootfs/testbuild --compression gzip test/a
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testrootfs/testbuild/a-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/config_protect" \
|
||||
--output $tmpdir/testrootfs/testbuild \
|
||||
--packages $tmpdir/testrootfs/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testrootfs/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
|
||||
mkdir $tmpdir/testrootfs/etc/luet/config.protect.d -p
|
||||
|
||||
cat <<EOF > $tmpdir/testrootfs/etc/luet/config.protect.d/conf1.yml
|
||||
name: "protect1"
|
||||
dirs:
|
||||
- /etc/
|
||||
EOF
|
||||
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_protect_confdir:
|
||||
- /etc/luet/config.protect.d
|
||||
config_from_host: false
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
|
||||
|
||||
testInstall() {
|
||||
|
||||
# Simulate previous installation
|
||||
mkdir $tmpdir/testrootfs/etc/a -p
|
||||
echo config > $tmpdir/testrootfs/etc/a/conf
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
|
||||
# Simulate config protect
|
||||
assertTrue 'package A installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'config protect not created, file is the same' "[ ! -e '$tmpdir/testrootfs/etc/a/._cfg0001_conf' ]"
|
||||
assertEquals 'config protect content' "$(cat $tmpdir/testrootfs/etc/a/conf)" "config"
|
||||
}
|
||||
|
||||
|
||||
testUnInstall() {
|
||||
luet uninstall -y --full --config $tmpdir/luet.yaml test/a
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'config protect maintains the protected files' "[ -e '$tmpdir/testrootfs/etc/a/conf' ]"
|
||||
}
|
||||
|
||||
|
||||
testCleanup() {
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ ! -e '$tmpdir/testrootfs/packages/a-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
79
tests/integration/16_perms.sh
Executable file
79
tests/integration/16_perms.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build -d --tree "$ROOT_DIR/tests/fixtures/perms" --same-owner=true --destination $tmpdir/testbuild --compression gzip --full
|
||||
buildst=$?
|
||||
assertTrue 'create package perms 0.1' "[ -e '$tmpdir/testbuild/perms-test-0.1.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/perms" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
$ROOT_DIR/tests/integration/bin/luet install -y --config $tmpdir/luet.yaml test/perms@0.1 test/perms2@0.1
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
assertTrue 'package installed perms baz' "[ -d '$tmpdir/testrootfs/foo/baz' ]"
|
||||
assertTrue 'package installed perms bar' "[ -d '$tmpdir/testrootfs/foo/bar' ]"
|
||||
|
||||
assertContains 'perms1' "$(stat -c %u:%g $tmpdir/testrootfs/foo/baz)" "100:100"
|
||||
assertContains 'perms2' "$(stat -c %u:%g $tmpdir/testrootfs/foo/bar)" "100:100"
|
||||
assertContains 'perms11' "$(stat -c %u:%g $tmpdir/testrootfs/foo/baz/.keep)" "101:101"
|
||||
assertContains 'perms22' "$(stat -c %u:%g $tmpdir/testrootfs/foo/bar/.keep)" "101:101"
|
||||
}
|
||||
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
@@ -3,11 +3,13 @@
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
@@ -15,7 +17,7 @@ testBuild() {
|
||||
bb: "ttt"
|
||||
EOF
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/build_values" --values $tmpdir/default.yaml --destination $tmpdir/testbuild --compression gzip --all
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/build_values" --values $tmpdir/default.yaml --destination $tmpdir/testbuild --compression gzip distro/a distro/b test/foo distro/c
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package B' "[ -e '$tmpdir/testbuild/b-distro-0.3.package.tar.gz' ]"
|
||||
@@ -63,7 +65,7 @@ EOF
|
||||
testBuildWithNoTree() {
|
||||
mkdir $tmpdir/testbuild2
|
||||
mkdir $tmpdir/emptytree
|
||||
luet build --from-repositories --tree $tmpdir/emptytree --config $tmpdir/luet.yaml test/c --destination $tmpdir/testbuild2 --compression gzip --all
|
||||
luet build --from-repositories --tree $tmpdir/emptytree --config $tmpdir/luet.yaml distro/c --destination $tmpdir/testbuild2 --compression gzip distro/a distro/b test/foo distro/c
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package B' "[ -e '$tmpdir/testbuild2/b-distro-0.3.package.tar.gz' ]"
|
||||
@@ -187,7 +189,7 @@ foo: "sq"
|
||||
EOF
|
||||
mkdir $tmpdir/testbuild3
|
||||
mkdir $tmpdir/emptytree
|
||||
luet build --from-repositories --values $tmpdir/default.yaml --tree $tmpdir/emptytree --config $tmpdir/luet.yaml test/c --destination $tmpdir/testbuild3 --compression gzip --all
|
||||
luet build --from-repositories --values $tmpdir/default.yaml --tree $tmpdir/emptytree --config $tmpdir/luet.yaml distro/c --destination $tmpdir/testbuild3 --compression gzip distro/a distro/b test/foo
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package B' "[ -e '$tmpdir/testbuild3/b-distro-0.3.package.tar.gz' ]"
|
||||
@@ -239,7 +241,6 @@ EOF
|
||||
assertTrue 'package installed A interpolated with values' "[ -e '$tmpdir/testrootfs3/a-newinterpolation' ]"
|
||||
# Finalizers can interpolate only on package field. No extra fields are allowed at this time.
|
||||
assertTrue 'finalizer executed on A' "[ -e '$tmpdir/testrootfs3/finalize-a' ]"
|
||||
|
||||
installed=$(luet --config $tmpdir/luet2.yaml search --installed .)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
|
@@ -3,11 +3,13 @@
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
@@ -39,11 +41,19 @@ testBuild() {
|
||||
|
||||
mkdir $tmpdir/testbuild
|
||||
mkdir $tmpdir/empty
|
||||
luet build --tree "$tmpdir/empty" --config $tmpdir/luet.yaml --from-repositories --destination $tmpdir/testbuild --compression zstd test/c@1.0 > /dev/null
|
||||
build_output=$(luet build --pull --tree "$tmpdir/empty" \
|
||||
--config $tmpdir/luet.yaml --concurrency 1 \
|
||||
--from-repositories --destination $tmpdir/testbuild --compression zstd test/c@1.0 test/z test/interpolated)
|
||||
buildst=$?
|
||||
echo "$build_output"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:d620e573c81eab36a9dc5cc314e80fd7b6e04aeff26127de4225bf24fe1f8e71"
|
||||
assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:b0f34b0d2d271f0f2619324476b2857b3b39ca895bddc2474a741f3c8c1acbbc"
|
||||
assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:c1f11f48113cd71d8795a06c7b49e1558bd7211d2aa88f5d79a3334f0393c64d"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
@@ -94,10 +104,12 @@ testInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet install -y --config $tmpdir/luet-client.yaml test/c@1.0
|
||||
luet install -y --config $tmpdir/luet-client.yaml test/c@1.0 test/z test/interpolated
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
assertTrue 'package interpolated installed' "[ -e '$tmpdir/testrootfs/interpolated-baz-bar' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
|
162
tests/integration/29_nobuildtreeinheritvalues.sh
Executable file
162
tests/integration/29_nobuildtreeinheritvalues.sh
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
cat <<EOF > $tmpdir/default.yaml
|
||||
extra: "an"
|
||||
EOF
|
||||
|
||||
mkdir $tmpdir/testbuild
|
||||
mkdir $tmpdir/empty
|
||||
|
||||
# With --rebuild, the package gets ignored
|
||||
build_output=$(luet build --pull --tree "$tmpdir/empty" \
|
||||
--config $tmpdir/luet.yaml --values $tmpdir/default.yaml --concurrency 1 \
|
||||
--from-repositories --destination $tmpdir/testbuild --compression zstd test/c@1.0 test/z test/interpolated)
|
||||
buildst=$?
|
||||
echo "$build_output"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertContains 'Does use the upstream cache without specifying it' "$build_output" "Images available for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:c1f11f48113cd71d8795a06c7b49e1558bd7211d2aa88f5d79a3334f0393c64d"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet create-repo \
|
||||
--output "${TEST_DOCKER_IMAGE}-2" \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--tree-compression zstd \
|
||||
--tree-filename foo.tar \
|
||||
--tree "$tmpdir/empty" --config $tmpdir/luet.yaml --from-repositories \
|
||||
--meta-filename repository.meta.tar \
|
||||
--meta-compression zstd \
|
||||
--type docker --push-images --force-push --debug
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
}
|
||||
|
||||
testConfigClient() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
cat <<EOF > $tmpdir/luet-client.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}-2"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet-client.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet install -y --config $tmpdir/luet-client.yaml test/c@1.0 test/z test/interpolated
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
ls -liah $tmpdir/testrootfs/
|
||||
assertTrue 'package interpolated installed' "[ -e '$tmpdir/testrootfs/interpolated-baz-bar' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertContains 'contains warning' "$output" 'No packages to install'
|
||||
}
|
||||
|
||||
testUnInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet uninstall -y --config $tmpdir/luet-client.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testInstallAgain() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertNotContains 'contains warning' "$output" 'No packages to install'
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package in cache' "[ -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet cleanup --config $tmpdir/luet-client.yaml
|
||||
installst=$?
|
||||
assertEquals 'cleanup test successfully' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
162
tests/integration/29_nobuildtreeinheritvalues_noignored.sh
Executable file
162
tests/integration/29_nobuildtreeinheritvalues_noignored.sh
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
cat <<EOF > $tmpdir/default.yaml
|
||||
extra: "an"
|
||||
EOF
|
||||
|
||||
mkdir $tmpdir/testbuild
|
||||
mkdir $tmpdir/empty
|
||||
|
||||
# With --rebuild, the package gets rebuild with the values applied
|
||||
build_output=$(luet build --pull --rebuild --tree "$tmpdir/empty" \
|
||||
--config $tmpdir/luet.yaml --values $tmpdir/default.yaml --concurrency 1 \
|
||||
--from-repositories --destination $tmpdir/testbuild --compression zstd test/c@1.0 test/z test/interpolated)
|
||||
buildst=$?
|
||||
echo "$build_output"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertContains 'Does use the upstream cache without specifying it' "$build_output" "Downloading image quay.io/mocaccinoos/integration-test-cache:6490e800fe443b99328fc363529aee74bda513930fb27ce6ab814d692bba068e"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet create-repo \
|
||||
--output "${TEST_DOCKER_IMAGE}-2" \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--tree-compression zstd \
|
||||
--tree-filename foo.tar \
|
||||
--tree "$tmpdir/empty" --config $tmpdir/luet.yaml --from-repositories \
|
||||
--meta-filename repository.meta.tar \
|
||||
--meta-compression zstd \
|
||||
--type docker --push-images --force-push --debug
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
}
|
||||
|
||||
testConfigClient() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
cat <<EOF > $tmpdir/luet-client.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}-2"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet-client.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet install -y --config $tmpdir/luet-client.yaml test/c@1.0 test/z test/interpolated
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
ls -liah $tmpdir/testrootfs/
|
||||
assertTrue 'package interpolated installed' "[ -e '$tmpdir/testrootfs/interpolated-baz-an' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertContains 'contains warning' "$output" 'No packages to install'
|
||||
}
|
||||
|
||||
testUnInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet uninstall -y --config $tmpdir/luet-client.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testInstallAgain() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertNotContains 'contains warning' "$output" 'No packages to install'
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package in cache' "[ -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet cleanup --config $tmpdir/luet-client.yaml
|
||||
installst=$?
|
||||
assertEquals 'cleanup test successfully' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
170
tests/integration/29_nobuildtreeinheritvalues_norebuild.sh
Executable file
170
tests/integration/29_nobuildtreeinheritvalues_norebuild.sh
Executable file
@@ -0,0 +1,170 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
docker images --filter='reference=luet/cache' --format='{{.Repository}}:{{.Tag}}' | xargs -r docker rmi
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
# This will be ignored, as no --rebuild is supplied
|
||||
cat <<EOF > $tmpdir/default.yaml
|
||||
extra: "an"
|
||||
EOF
|
||||
|
||||
mkdir $tmpdir/testbuild
|
||||
mkdir $tmpdir/empty
|
||||
cp -rf "$ROOT_DIR/tests/fixtures/docker_repo/interpolated" $tmpdir/empty/
|
||||
|
||||
cat <<EOF > $tmpdir/empty/interpolated/definition.yaml
|
||||
category: "test"
|
||||
name: "interpolated"
|
||||
version: "1.1"
|
||||
foo: "bar"
|
||||
EOF
|
||||
|
||||
build_output=$(luet build --pull --tree "$tmpdir/empty" \
|
||||
--config $tmpdir/luet.yaml --values $tmpdir/default.yaml --concurrency 1 \
|
||||
--from-repositories --destination $tmpdir/testbuild --compression zstd test/c@1.0 test/z test/interpolated)
|
||||
buildst=$?
|
||||
echo "$build_output"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.1.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet create-repo \
|
||||
--output "${TEST_DOCKER_IMAGE}-2" \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--tree-compression zstd \
|
||||
--tree-filename foo.tar \
|
||||
--tree "$tmpdir/empty" --config $tmpdir/luet.yaml --from-repositories \
|
||||
--meta-filename repository.meta.tar \
|
||||
--meta-compression zstd \
|
||||
--type docker --push-images --force-push --debug
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
}
|
||||
|
||||
testConfigClient() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
cat <<EOF > $tmpdir/luet-client.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "docker"
|
||||
enable: true
|
||||
urls:
|
||||
- "${TEST_DOCKER_IMAGE}-2"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet-client.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet install -y --config $tmpdir/luet-client.yaml test/c@1.0 test/z test/interpolated
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package Z installed' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
ls -liah $tmpdir/testrootfs/
|
||||
assertTrue 'package interpolated installed' "[ -e '$tmpdir/testrootfs/interpolated-bar-an' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertContains 'contains warning' "$output" 'No packages to install'
|
||||
}
|
||||
|
||||
testUnInstall() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet uninstall -y --config $tmpdir/luet-client.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testInstallAgain() {
|
||||
# Disable tests which require a DOCKER registry
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
output=$(luet install -y --config $tmpdir/luet-client.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertNotContains 'contains warning' "$output" 'No packages to install'
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package in cache' "[ -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
[ -z "${TEST_DOCKER_IMAGE:-}" ] && startSkipping
|
||||
|
||||
luet cleanup --config $tmpdir/luet-client.yaml
|
||||
installst=$?
|
||||
assertEquals 'cleanup test successfully' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
2
vendor/github.com/codegangsta/inject/.gitignore
generated
vendored
2
vendor/github.com/codegangsta/inject/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
||||
inject
|
||||
inject.test
|
20
vendor/github.com/codegangsta/inject/LICENSE
generated
vendored
20
vendor/github.com/codegangsta/inject/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Jeremy Saenz
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
92
vendor/github.com/codegangsta/inject/README.md
generated
vendored
92
vendor/github.com/codegangsta/inject/README.md
generated
vendored
@@ -1,92 +0,0 @@
|
||||
# inject
|
||||
--
|
||||
import "github.com/codegangsta/inject"
|
||||
|
||||
Package inject provides utilities for mapping and injecting dependencies in
|
||||
various ways.
|
||||
|
||||
Language Translations:
|
||||
* [简体中文](translations/README_zh_cn.md)
|
||||
|
||||
## Usage
|
||||
|
||||
#### func InterfaceOf
|
||||
|
||||
```go
|
||||
func InterfaceOf(value interface{}) reflect.Type
|
||||
```
|
||||
InterfaceOf dereferences a pointer to an Interface type. It panics if value is
|
||||
not an pointer to an interface.
|
||||
|
||||
#### type Applicator
|
||||
|
||||
```go
|
||||
type Applicator interface {
|
||||
// Maps dependencies in the Type map to each field in the struct
|
||||
// that is tagged with 'inject'. Returns an error if the injection
|
||||
// fails.
|
||||
Apply(interface{}) error
|
||||
}
|
||||
```
|
||||
|
||||
Applicator represents an interface for mapping dependencies to a struct.
|
||||
|
||||
#### type Injector
|
||||
|
||||
```go
|
||||
type Injector interface {
|
||||
Applicator
|
||||
Invoker
|
||||
TypeMapper
|
||||
// SetParent sets the parent of the injector. If the injector cannot find a
|
||||
// dependency in its Type map it will check its parent before returning an
|
||||
// error.
|
||||
SetParent(Injector)
|
||||
}
|
||||
```
|
||||
|
||||
Injector represents an interface for mapping and injecting dependencies into
|
||||
structs and function arguments.
|
||||
|
||||
#### func New
|
||||
|
||||
```go
|
||||
func New() Injector
|
||||
```
|
||||
New returns a new Injector.
|
||||
|
||||
#### type Invoker
|
||||
|
||||
```go
|
||||
type Invoker interface {
|
||||
// Invoke attempts to call the interface{} provided as a function,
|
||||
// providing dependencies for function arguments based on Type. Returns
|
||||
// a slice of reflect.Value representing the returned values of the function.
|
||||
// Returns an error if the injection fails.
|
||||
Invoke(interface{}) ([]reflect.Value, error)
|
||||
}
|
||||
```
|
||||
|
||||
Invoker represents an interface for calling functions via reflection.
|
||||
|
||||
#### type TypeMapper
|
||||
|
||||
```go
|
||||
type TypeMapper interface {
|
||||
// Maps the interface{} value based on its immediate type from reflect.TypeOf.
|
||||
Map(interface{}) TypeMapper
|
||||
// Maps the interface{} value based on the pointer of an Interface provided.
|
||||
// This is really only useful for mapping a value as an interface, as interfaces
|
||||
// cannot at this time be referenced directly without a pointer.
|
||||
MapTo(interface{}, interface{}) TypeMapper
|
||||
// Provides a possibility to directly insert a mapping based on type and value.
|
||||
// This makes it possible to directly map type arguments not possible to instantiate
|
||||
// with reflect like unidirectional channels.
|
||||
Set(reflect.Type, reflect.Value) TypeMapper
|
||||
// Returns the Value that is mapped to the current type. Returns a zeroed Value if
|
||||
// the Type has not been mapped.
|
||||
Get(reflect.Type) reflect.Value
|
||||
}
|
||||
```
|
||||
|
||||
TypeMapper represents an interface for mapping interface{} values based on type.
|
187
vendor/github.com/codegangsta/inject/inject.go
generated
vendored
187
vendor/github.com/codegangsta/inject/inject.go
generated
vendored
@@ -1,187 +0,0 @@
|
||||
// Package inject provides utilities for mapping and injecting dependencies in various ways.
|
||||
package inject
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Injector represents an interface for mapping and injecting dependencies into structs
|
||||
// and function arguments.
|
||||
type Injector interface {
|
||||
Applicator
|
||||
Invoker
|
||||
TypeMapper
|
||||
// SetParent sets the parent of the injector. If the injector cannot find a
|
||||
// dependency in its Type map it will check its parent before returning an
|
||||
// error.
|
||||
SetParent(Injector)
|
||||
}
|
||||
|
||||
// Applicator represents an interface for mapping dependencies to a struct.
|
||||
type Applicator interface {
|
||||
// Maps dependencies in the Type map to each field in the struct
|
||||
// that is tagged with 'inject'. Returns an error if the injection
|
||||
// fails.
|
||||
Apply(interface{}) error
|
||||
}
|
||||
|
||||
// Invoker represents an interface for calling functions via reflection.
|
||||
type Invoker interface {
|
||||
// Invoke attempts to call the interface{} provided as a function,
|
||||
// providing dependencies for function arguments based on Type. Returns
|
||||
// a slice of reflect.Value representing the returned values of the function.
|
||||
// Returns an error if the injection fails.
|
||||
Invoke(interface{}) ([]reflect.Value, error)
|
||||
}
|
||||
|
||||
// TypeMapper represents an interface for mapping interface{} values based on type.
|
||||
type TypeMapper interface {
|
||||
// Maps the interface{} value based on its immediate type from reflect.TypeOf.
|
||||
Map(interface{}) TypeMapper
|
||||
// Maps the interface{} value based on the pointer of an Interface provided.
|
||||
// This is really only useful for mapping a value as an interface, as interfaces
|
||||
// cannot at this time be referenced directly without a pointer.
|
||||
MapTo(interface{}, interface{}) TypeMapper
|
||||
// Provides a possibility to directly insert a mapping based on type and value.
|
||||
// This makes it possible to directly map type arguments not possible to instantiate
|
||||
// with reflect like unidirectional channels.
|
||||
Set(reflect.Type, reflect.Value) TypeMapper
|
||||
// Returns the Value that is mapped to the current type. Returns a zeroed Value if
|
||||
// the Type has not been mapped.
|
||||
Get(reflect.Type) reflect.Value
|
||||
}
|
||||
|
||||
type injector struct {
|
||||
values map[reflect.Type]reflect.Value
|
||||
parent Injector
|
||||
}
|
||||
|
||||
// InterfaceOf dereferences a pointer to an Interface type.
|
||||
// It panics if value is not an pointer to an interface.
|
||||
func InterfaceOf(value interface{}) reflect.Type {
|
||||
t := reflect.TypeOf(value)
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Interface {
|
||||
panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)")
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// New returns a new Injector.
|
||||
func New() Injector {
|
||||
return &injector{
|
||||
values: make(map[reflect.Type]reflect.Value),
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke attempts to call the interface{} provided as a function,
|
||||
// providing dependencies for function arguments based on Type.
|
||||
// Returns a slice of reflect.Value representing the returned values of the function.
|
||||
// Returns an error if the injection fails.
|
||||
// It panics if f is not a function
|
||||
func (inj *injector) Invoke(f interface{}) ([]reflect.Value, error) {
|
||||
t := reflect.TypeOf(f)
|
||||
|
||||
var in = make([]reflect.Value, t.NumIn()) //Panic if t is not kind of Func
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
argType := t.In(i)
|
||||
val := inj.Get(argType)
|
||||
if !val.IsValid() {
|
||||
return nil, fmt.Errorf("Value not found for type %v", argType)
|
||||
}
|
||||
|
||||
in[i] = val
|
||||
}
|
||||
|
||||
return reflect.ValueOf(f).Call(in), nil
|
||||
}
|
||||
|
||||
// Maps dependencies in the Type map to each field in the struct
|
||||
// that is tagged with 'inject'.
|
||||
// Returns an error if the injection fails.
|
||||
func (inj *injector) Apply(val interface{}) error {
|
||||
v := reflect.ValueOf(val)
|
||||
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil // Should not panic here ?
|
||||
}
|
||||
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
structField := t.Field(i)
|
||||
if f.CanSet() && (structField.Tag == "inject" || structField.Tag.Get("inject") != "") {
|
||||
ft := f.Type()
|
||||
v := inj.Get(ft)
|
||||
if !v.IsValid() {
|
||||
return fmt.Errorf("Value not found for type %v", ft)
|
||||
}
|
||||
|
||||
f.Set(v)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Maps the concrete value of val to its dynamic type using reflect.TypeOf,
|
||||
// It returns the TypeMapper registered in.
|
||||
func (i *injector) Map(val interface{}) TypeMapper {
|
||||
i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper {
|
||||
i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val)
|
||||
return i
|
||||
}
|
||||
|
||||
// Maps the given reflect.Type to the given reflect.Value and returns
|
||||
// the Typemapper the mapping has been registered in.
|
||||
func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
|
||||
i.values[typ] = val
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *injector) Get(t reflect.Type) reflect.Value {
|
||||
val := i.values[t]
|
||||
|
||||
if val.IsValid() {
|
||||
return val
|
||||
}
|
||||
|
||||
// no concrete types found, try to find implementors
|
||||
// if t is an interface
|
||||
if t.Kind() == reflect.Interface {
|
||||
for k, v := range i.values {
|
||||
if k.Implements(t) {
|
||||
val = v
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Still no type found, try to look it up on the parent
|
||||
if !val.IsValid() && i.parent != nil {
|
||||
val = i.parent.Get(t)
|
||||
}
|
||||
|
||||
return val
|
||||
|
||||
}
|
||||
|
||||
func (i *injector) SetParent(parent Injector) {
|
||||
i.parent = parent
|
||||
}
|
3
vendor/github.com/codegangsta/inject/update_readme.sh
generated
vendored
3
vendor/github.com/codegangsta/inject/update_readme.sh
generated
vendored
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
go get github.com/robertkrimen/godocdown/godocdown
|
||||
godocdown > README.md
|
27
vendor/github.com/mudler/go-pluggable/README.md
generated
vendored
27
vendor/github.com/mudler/go-pluggable/README.md
generated
vendored
@@ -1,6 +1,7 @@
|
||||
# go-pluggable
|
||||
# :bento: go-pluggable
|
||||
[](https://pkg.go.dev/github.com/mudler/go-pluggable) [](https://goreportcard.com/report/github.com/mudler/go-pluggable) [](https://github.com/mudler/go-pluggable/actions?query=workflow%3ATest)
|
||||
|
||||
light Bus-event driven plugin library for Golang.
|
||||
:bento: *go-pluggable* is a light Bus-event driven plugin library for Golang.
|
||||
|
||||
`go-pluggable` implements the event/sub pattern to extend your Golang project with external binary plugins that can be written in any language.
|
||||
|
||||
@@ -24,8 +25,24 @@ func main() {
|
||||
m.Autoload("test", temp)
|
||||
m.Register()
|
||||
|
||||
// ...
|
||||
m.Publish(myEv, map[string]string{"foo": "bar"}) // test-foo, will receive our data as json payload
|
||||
// Optionally process plugin results response
|
||||
// The plugins has to return as output a json in stdout in the format { 'state': "somestate", data: "some data", error: "some error" }
|
||||
// e.g. with jq:
|
||||
// jq --arg key0 'state' \
|
||||
// --arg value0 '' \
|
||||
// --arg key1 'data' \
|
||||
// --arg value1 "" \
|
||||
// --arg key2 'error' \
|
||||
// --arg value2 '' \
|
||||
// '. | .[$key0]=$value0 | .[$key1]=$value1 | .[$key2]=$value2' \
|
||||
// <<<'{}'
|
||||
m.Response(myEv, func(p *pluggable.Plugin, r *pluggable.EventResponse) { ... })
|
||||
|
||||
// Emit events, they are encoded and passed as JSON payloads to the plugins.
|
||||
// In our case, test-foo will receive the map as JSON
|
||||
m.Publish(myEv, map[string]string{"foo": "bar"})
|
||||
|
||||
|
||||
}
|
||||
|
||||
```
|
||||
```
|
||||
|
73
vendor/github.com/mudler/go-pluggable/bus.go
generated
vendored
73
vendor/github.com/mudler/go-pluggable/bus.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pluggable
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/chuckpreslar/emission"
|
||||
"github.com/codegangsta/inject"
|
||||
)
|
||||
|
||||
// Bus represent the bus event system
|
||||
type Bus struct {
|
||||
inject.Injector
|
||||
emission.Emitter
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewBus returns a new Bus instance
|
||||
func NewBus() *Bus {
|
||||
return &Bus{
|
||||
Injector: inject.New(),
|
||||
Emitter: *emission.NewEmitter(),
|
||||
}
|
||||
}
|
||||
|
||||
// Listen Binds a callback to an event, mapping the arguments on a global level
|
||||
func (a *Bus) Listen(event EventType, listener interface{}) *Bus {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.On(string(event), func() { a.Invoke(listener) })
|
||||
return a
|
||||
}
|
||||
|
||||
// Publish publishes an event, it does accept only the event as argument, since
|
||||
// the callback will have access to the service mapped by the injector
|
||||
func (a *Bus) Publish(e *Event) *Bus {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.Map(e)
|
||||
a.Emit(string(e.Name))
|
||||
return a
|
||||
}
|
||||
|
||||
// OnlyOnce Binds a callback to an event, mapping the arguments on a global level
|
||||
// It is fired only once.
|
||||
func (a *Bus) OnlyOnce(event EventType, listener interface{}) *Bus {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
a.Once(string(event), func() { a.Invoke(listener) })
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Bus) propagateEvent(p Plugin) func(e *Event) {
|
||||
return func(e *Event) {
|
||||
resp, _ := p.Run(*e)
|
||||
a.Map(&resp)
|
||||
a.Emit(p.Name)
|
||||
}
|
||||
}
|
18
vendor/github.com/mudler/go-pluggable/events.go
generated
vendored
18
vendor/github.com/mudler/go-pluggable/events.go
generated
vendored
@@ -15,7 +15,10 @@
|
||||
|
||||
package pluggable
|
||||
|
||||
import "encoding/json"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// EventType describes an event type
|
||||
type EventType string
|
||||
@@ -26,6 +29,7 @@ type EventType string
|
||||
type Event struct {
|
||||
Name EventType `json:"name"`
|
||||
Data string `json:"data"`
|
||||
File string `json:"file"` // If Data >> 10K write content to file instead
|
||||
}
|
||||
|
||||
// EventResponse describes the event response structure
|
||||
@@ -42,6 +46,16 @@ func (e Event) JSON() (string, error) {
|
||||
return string(dat), err
|
||||
}
|
||||
|
||||
// Copy returns a copy of Event
|
||||
func (e Event) Copy() *Event {
|
||||
copy := &e
|
||||
return copy
|
||||
}
|
||||
|
||||
func (e Event) ResponseEventName(s string) EventType {
|
||||
return EventType(fmt.Sprintf("%s-%s", e.Name, s))
|
||||
}
|
||||
|
||||
// Unmarshal decodes the json payload in the given parameteer
|
||||
func (r EventResponse) Unmarshal(i interface{}) error {
|
||||
return json.Unmarshal([]byte(r.Data), i)
|
||||
@@ -52,7 +66,7 @@ func (r EventResponse) Errored() bool {
|
||||
return len(r.Error) != 0
|
||||
}
|
||||
|
||||
// NewEvent retuns a new event which can be used for publishing
|
||||
// NewEvent returns a new event which can be used for publishing
|
||||
// the obj gets automatically serialized in json.
|
||||
func NewEvent(name EventType, obj interface{}) (*Event, error) {
|
||||
dat, err := json.Marshal(obj)
|
||||
|
1
vendor/github.com/mudler/go-pluggable/go.mod
generated
vendored
1
vendor/github.com/mudler/go-pluggable/go.mod
generated
vendored
@@ -4,7 +4,6 @@ go 1.14
|
||||
|
||||
require (
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0
|
||||
github.com/onsi/ginkgo v1.14.2
|
||||
github.com/onsi/gomega v1.10.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
2
vendor/github.com/mudler/go-pluggable/go.sum
generated
vendored
2
vendor/github.com/mudler/go-pluggable/go.sum
generated
vendored
@@ -1,7 +1,5 @@
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 h1:xz6Nv3zcwO2Lila35hcb0QloCQsc38Al13RNEzWRpX4=
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9/go.mod h1:2wSM9zJkl1UQEFZgSd68NfCgRz1VL1jzy/RjCg+ULrs=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
|
40
vendor/github.com/mudler/go-pluggable/manager.go
generated
vendored
40
vendor/github.com/mudler/go-pluggable/manager.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/chuckpreslar/emission"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -29,14 +30,14 @@ import (
|
||||
type Manager struct {
|
||||
Plugins []Plugin
|
||||
Events []EventType
|
||||
Bus *Bus
|
||||
Bus *emission.Emitter
|
||||
}
|
||||
|
||||
// NewManager returns a manager instance with a new bus and
|
||||
func NewManager(events []EventType) *Manager {
|
||||
return &Manager{
|
||||
Events: events,
|
||||
Bus: NewBus(),
|
||||
Bus: emission.NewEmitter(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,19 +48,41 @@ func (m *Manager) Register() *Manager {
|
||||
}
|
||||
|
||||
// Publish is a wrapper around NewEvent and the Manager internal Bus publishing system
|
||||
// It accepts optionally a list of functions that are called with the plugin result (only once)
|
||||
func (m *Manager) Publish(event EventType, obj interface{}) (*Manager, error) {
|
||||
ev, err := NewEvent(event, obj)
|
||||
if err == nil && ev != nil {
|
||||
m.Bus.Publish(ev)
|
||||
m.Bus.Emit(string(ev.Name), ev)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// Response binds a set of listeners to an event type. The listeners are called for each result from
|
||||
// every plugin when Publish is called.
|
||||
func (m *Manager) Response(event EventType, listener ...func(p *Plugin, r *EventResponse)) *Manager {
|
||||
ev, _ := NewEvent(event, nil)
|
||||
for _, l := range listener {
|
||||
m.Bus.On(string(ev.ResponseEventName("results")), l)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Manager) propagateEvent(p Plugin) func(e *Event) {
|
||||
return func(e *Event) {
|
||||
resp, err := p.Run(*e)
|
||||
r := &resp
|
||||
if err != nil && !resp.Errored() {
|
||||
resp.Error = err.Error()
|
||||
}
|
||||
m.Bus.Emit(string(e.ResponseEventName("results")), &p, r)
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe subscribes the plugin to the events in the given bus
|
||||
func (m *Manager) Subscribe(b *Bus) *Manager {
|
||||
func (m *Manager) Subscribe(b *emission.Emitter) *Manager {
|
||||
for _, p := range m.Plugins {
|
||||
for _, e := range m.Events {
|
||||
b.Listen(e, b.propagateEvent(p))
|
||||
b.On(string(e), m.propagateEvent(p))
|
||||
}
|
||||
}
|
||||
return m
|
||||
@@ -74,13 +97,6 @@ func relativeToCwd(p string) (string, error) {
|
||||
return filepath.Join(cwd, p), nil
|
||||
}
|
||||
|
||||
// ListenAll Binds a callback to all plugins event
|
||||
func (m *Manager) ListenAll(event EventType, listener interface{}) {
|
||||
for _, p := range m.Plugins {
|
||||
m.Bus.Listen(EventType(p.Name), listener)
|
||||
}
|
||||
}
|
||||
|
||||
// Autoload automatically loads plugins binaries prefixed by 'prefix' in the current path
|
||||
// optionally takes a list of paths to look also into
|
||||
func (m *Manager) Autoload(prefix string, extensionpath ...string) *Manager {
|
||||
|
31
vendor/github.com/mudler/go-pluggable/plugin.go
generated
vendored
31
vendor/github.com/mudler/go-pluggable/plugin.go
generated
vendored
@@ -16,7 +16,9 @@
|
||||
package pluggable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
@@ -29,19 +31,42 @@ type Plugin struct {
|
||||
Executable string
|
||||
}
|
||||
|
||||
// A safe threshold to avoid unpleasant exec buffer fill for argv too big. Seems 128K is the limit on Linux.
|
||||
const maxMessageSize = 1 << 13
|
||||
|
||||
// Run runs the Event on the plugin, and returns an EventResponse
|
||||
func (p Plugin) Run(e Event) (EventResponse, error) {
|
||||
r := EventResponse{}
|
||||
k, err := e.JSON()
|
||||
|
||||
eventToprocess := &e
|
||||
|
||||
if len(e.Data) > maxMessageSize {
|
||||
copy := e.Copy()
|
||||
copy.Data = ""
|
||||
f, err := ioutil.TempFile(os.TempDir(), "pluggable")
|
||||
if err != nil {
|
||||
return r, errors.Wrap(err, "while creating temporary file")
|
||||
}
|
||||
if err := ioutil.WriteFile(f.Name(), []byte(e.Data), os.ModePerm); err != nil {
|
||||
return r, errors.Wrap(err, "while writing to temporary file")
|
||||
}
|
||||
copy.File = f.Name()
|
||||
eventToprocess = copy
|
||||
defer os.RemoveAll(f.Name())
|
||||
}
|
||||
|
||||
k, err := eventToprocess.JSON()
|
||||
if err != nil {
|
||||
return r, errors.Wrap(err, "while marshalling event")
|
||||
}
|
||||
cmd := exec.Command(p.Executable, string(e.Name), k)
|
||||
cmd.Env = os.Environ()
|
||||
var b bytes.Buffer
|
||||
cmd.Stderr = &b
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
r.Error = err.Error()
|
||||
return r, errors.Wrap(err, "while executing plugin")
|
||||
r.Error = "error while executing plugin: " + err.Error() + string(b.String())
|
||||
return r, errors.Wrap(err, "while executing plugin: "+string(b.String()))
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(out, &r); err != nil {
|
||||
|
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@@ -62,8 +62,6 @@ github.com/cavaliercoder/grab/bps
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9
|
||||
github.com/chuckpreslar/emission
|
||||
# github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0
|
||||
github.com/codegangsta/inject
|
||||
# github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d
|
||||
github.com/containerd/cgroups/stats/v1
|
||||
# github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55
|
||||
@@ -209,7 +207,6 @@ github.com/fatih/color
|
||||
# github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/fsnotify/fsnotify
|
||||
# github.com/fsouza/go-dockerclient v1.6.4
|
||||
## explicit
|
||||
github.com/fsouza/go-dockerclient
|
||||
# github.com/genuinetools/img v0.5.11
|
||||
## explicit
|
||||
@@ -478,7 +475,7 @@ github.com/mudler/cobra-extensions
|
||||
# github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87
|
||||
## explicit
|
||||
github.com/mudler/docker-companion/api
|
||||
# github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82
|
||||
# github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af
|
||||
## explicit
|
||||
github.com/mudler/go-pluggable
|
||||
# github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
|
Reference in New Issue
Block a user