1
0
mirror of https://github.com/rancher/rke.git synced 2025-04-27 11:21:08 +00:00

Update vendor

This commit is contained in:
Daishan 2020-02-13 15:47:04 -07:00
parent 27c157124d
commit 8d05697c5e
41 changed files with 2246 additions and 13570 deletions

2
go.mod
View File

@ -20,6 +20,7 @@ require (
github.com/docker/docker v0.7.3-0.20190808172531-150530564a14
github.com/docker/go-connections v0.3.0
github.com/ghodss/yaml v1.0.0
github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-ini/ini v1.37.0
github.com/gorilla/mux v1.7.3 // indirect
github.com/mattn/go-colorable v0.1.0
@ -27,7 +28,6 @@ require (
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/errors v0.8.1
github.com/rancher/kontainer-driver-metadata v0.0.0-20200218183853-2f58a2f30054
github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd
github.com/rancher/types v0.0.0-20200218191331-dc762fc27c91
github.com/sirupsen/logrus v1.4.2

59
go.sum
View File

@ -33,10 +33,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -58,9 +56,7 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -103,7 +99,6 @@ github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@ -111,7 +106,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/prometheus-operator v0.33.0 h1:mDblqA4KG+KBxOYt4mJE2coJhiHv+ScgnOvuchmgTYM=
github.com/coreos/prometheus-operator v0.33.0/go.mod h1:Bk/PShB4YYCX7sIRLuFv2/d8WA8P2gpj8RahsGGPfy0=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@ -131,7 +125,6 @@ github.com/docker/docker v0.7.3-0.20190808172531-150530564a14 h1:ZkcBCvSGEg1Er3X
github.com/docker/docker v0.7.3-0.20190808172531-150530564a14/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@ -146,7 +139,6 @@ github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.6.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@ -168,6 +160,8 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE=
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
github.com/go-ini/ini v1.21.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.37.0 h1:/FpMfveJbc7ExTTDgT5nL9Vw+aZdst/c2dOxC931U+M=
github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
@ -191,17 +185,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
@ -217,9 +207,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
@ -229,9 +217,7 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
@ -255,7 +241,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -268,7 +253,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
@ -306,7 +290,6 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@ -315,7 +298,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20181025070259-68e3a13e4117/
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -361,9 +343,7 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -397,14 +377,11 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/maruel/panicparse v0.0.0-20171209025017-c0182c169410/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI=
github.com/maruel/ut v1.0.0/go.mod h1:I68ffiAt5qre9obEVTy7S2/fj2dJku2NYLvzPuY0gqE=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o=
@ -495,16 +472,12 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -512,37 +485,24 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/prometheus v2.9.2+incompatible/go.mod h1:vdLuLLM0uqhLSofrQ7Nev2b/rQUyZ+pkT3zF7LB/i1g=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g=
github.com/rancher/kontainer-driver-metadata v0.0.0-20200218183853-2f58a2f30054 h1:yvMQKfMyjGWaPmD8dhYk0wZTsjxCZbNPXXa7pziRns0=
github.com/rancher/kontainer-driver-metadata v0.0.0-20200218183853-2f58a2f30054/go.mod h1:MkY6hsQLw3cHrb0V/Fmn4ur2c5PBIpLIe+gvMWoTXGI=
github.com/rancher/norman v0.0.0-20200123223841-6d86f4e37a69 h1:0Vs/m/ulb6kZiLkyOiw2yrLyBWirG9KHQYLbJ0rXor8=
github.com/rancher/norman v0.0.0-20200123223841-6d86f4e37a69/go.mod h1:XMiRH4NH6ELMELnQOoIef/g7UAtCzTxHaYRk0kMlwwQ=
github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd h1:96iahn2n4qq7EuJ6QNrW2iqm1xVUktzjHSyNPPm/6YU=
github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd/go.mod h1:b+483H276jRBXYosdWrNKFpxH+JYMs3UIdlV60dhdg0=
github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e h1:j6+HqCET/NLPBtew2m5apL7jWw/PStQ7iGwXjgAqdvo=
github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e/go.mod h1:XbYHTPaXuw8ZY9bylhYKQh/nJxDaTKk3YhAxPl4Qy/k=
github.com/rancher/types v0.0.0-20200123224322-9adcafc483ee/go.mod h1:rN9IgDRV1O4HTJCLtpLqyUEfdPsoeBoH2wNUm8PPThk=
github.com/rancher/types v0.0.0-20200218191331-dc762fc27c91 h1:IitaXkx6eqd4nCQMzJkGriaNWk/kDKfO5CasK3AXhp4=
github.com/rancher/types v0.0.0-20200218191331-dc762fc27c91/go.mod h1:7v/f4VGMfBQi5yQQxlFHylW6QP4w6vCxd6KKPG5sZpM=
github.com/rancher/wrangler v0.4.0 h1:iLvuJcZkd38E3RGG74dFMMNEju0PeTzfT1PQiv5okVU=
github.com/rancher/wrangler v0.4.0/go.mod h1:1cR91WLhZgkZ+U4fV9nVuXqKurWbgXcIReU4wnQvTN8=
github.com/rancher/wrangler v0.4.1 h1:kQLE6syPbX4ciwU4OF+EHIPT9zj6r6pyN83u9Gsv5eg=
github.com/rancher/wrangler v0.4.1/go.mod h1:1cR91WLhZgkZ+U4fV9nVuXqKurWbgXcIReU4wnQvTN8=
github.com/rancher/wrangler v0.4.2-0.20200214231136-099089b8a398 h1:1C/Fp1aSEL4Pl4hzlQsdaqzcCB9qMgalbVkvzzPIrPk=
github.com/rancher/wrangler v0.4.2-0.20200214231136-099089b8a398/go.mod h1:1cR91WLhZgkZ+U4fV9nVuXqKurWbgXcIReU4wnQvTN8=
@ -585,7 +545,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -595,7 +554,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -621,15 +579,12 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569 h1:nSQar3Y0E3VQF/VdZ8PTAilaXpER+d7ypdABCrpwMdg=
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df h1:shvkWr0NAZkg4nPuE3XrKP0VuBPijjk3TfX6Y6acFNg=
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15 h1:Z2sc4+v0JHV6Mn4kX1f2a5nruNjmV+Th32sugE8zwz8=
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -643,7 +598,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -690,7 +644,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -715,9 +668,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -752,7 +703,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
@ -783,7 +733,6 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -811,7 +760,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -849,9 +797,7 @@ k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1
k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e h1:HqlU9dKk5YVs7R84jmq6U3Wo/XslpkxHpBv2iWHLtLc=
k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
@ -859,7 +805,6 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-aggregator v0.17.2 h1:3E94T8cVy3Zsh75wffsyuk04CiQ8gLzsjlaFwb1wHRA=
k8s.io/kube-aggregator v0.17.2/go.mod h1:8xQTzaH0GrcKPiSB4YYWwWbeQ0j/4zRsbQt8usEMbRg=
k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=

16
vendor/github.com/go-bindata/go-bindata/.gitignore generated vendored Normal file
View File

@ -0,0 +1,16 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Goland project files
.idea/
*.iml

View File

@ -0,0 +1,79 @@
## Contribution guidelines.
So you wish to contribute to this project? Fantastic!
Here are a few guidelines to help you do this in a
streamlined fashion.
## Bug reports
When supplying a bug report, please consider the following guidelines.
These serve to make it easier for us to address the issue and find a solution.
Most of these are pretty self-evident, but sometimes it is still necessary
to reiterate them.
* Be clear in the way you express the problem. Use simple language and
just enough of it to clearly define the issue. Not everyone is a native
English speaker. And while most can handle themselves pretty well,
it helps to stay away from more esoteric vocabulary.
Be patient with non-native English speakers. If their bug reports
or comments are hard to understand, just ask for clarification.
Do not start guessing at their meaning, as this may just lead to
more confusion and misunderstandings.
* Clearly define any information which is relevant to the problem.
This includes library versions, operating system and any other
external dependencies which may be needed.
* Where applicable, provide a step-by-step listing of the way to
reproduce the problem. Make sure this is the simplest possible
way to do so. Omit any and all unneccesary steps, because they may
just complicate our understanding of the real problem.
If need be, create a whole new code project on your local machine,
which specifically tries to create the problem you are running into;
nothing more, nothing less.
Include this program in the bug report. It often suffices to paste
the code in a [Gist](https://gist.github.com) or on the
[Go playground](http://play.golang.org).
* If possible, provide us with a listing of the steps you have already
undertaken to solve the problem. This can save us a great deal of
wasted time, trying out solutions you have already covered.
## Pull requests
Bug reports are great. Supplying fixes to bugs is even better.
When submitting a pull request, the following guidelines are
good to keep in mind:
* `go fmt`: **Always** run your code through `go fmt`, before
committing it. Code has to be readable by many different
people. And the only way this will be as painless as possible,
is if we all stick to the same code style.
Some of our projects may have automated build-servers hooked up
to commit hooks. These will vet any submitted code and determine
if it meets a set of properties. One of which is code formatting.
These servers will outright deny a submission which has not been
run through `go fmt`, even if the code itself is correct.
We try to maintain a zero-tolerance policy on this matter,
because consistently formatted code makes life a great deal
easier for everyone involved.
* Commit log messages: When committing changes, do so often and
clearly -- Even if you have changed only 1 character in a code
comment. This means that commit log messages should clearly state
exactly what the change does and why. If it fixes a known issue,
then mention the issue number in the commit log. E.g.:
> Fixes return value for `foo/boo.Baz()` to be consistent with
> the rest of the API. This addresses issue #32
Do not pile a lot of unrelated changes into a single commit.
Pick and choose only those changes for a single commit, which are
directly related. We would much rather see a hundred commits
saying nothing but `"Runs go fmt"` in between any real fixes
than have these style changes embedded in those real fixes.
It creates a lot of noise when trying to review code.

3
vendor/github.com/go-bindata/go-bindata/LICENSE generated vendored Normal file
View File

@ -0,0 +1,3 @@
This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
license. Its contents can be found at:
http://creativecommons.org/publicdomain/zero/1.0

2
vendor/github.com/go-bindata/go-bindata/Makefile generated vendored Normal file
View File

@ -0,0 +1,2 @@
all:
make -C testdata

199
vendor/github.com/go-bindata/go-bindata/README.md generated vendored Normal file
View File

@ -0,0 +1,199 @@
## bindata
[![Go Report Card](https://goreportcard.com/badge/github.com/go-bindata/bindata)](https://goreportcard.com/report/github.com/go-bindata/bindata)
This package converts any file into managable Go source code. Useful for
embedding binary data into a go program. The file data is optionally gzip
compressed before being converted to a raw byte slice.
It comes with a command line tool in the `go-bindata` sub directory.
This tool offers a set of command line options, used to customize the
output being generated.
### Installation
To install the library and command line program, use the following:
go get -u github.com/go-bindata/go-bindata/...
### Usage
Conversion is done on one or more sets of files. They are all embedded in a new
Go source file, along with a table of contents and an `Asset` function,
which allows quick access to the asset, based on its name.
The simplest invocation generates a `bindata.go` file in the current
working directory. It includes all assets from the `data` directory.
$ go-bindata data/
To include all input sub-directories recursively, use the elipsis postfix
as defined for Go import paths. Otherwise it will only consider assets in the
input directory itself.
$ go-bindata data/...
To specify the name of the output file being generated, we use the following:
$ go-bindata -o myfile.go data/
Multiple input directories can be specified if necessary.
$ go-bindata dir1/... /path/to/dir2/... dir3
The following paragraphs detail some of the command line options which can be
supplied to `go-bindata`. Refer to the `testdata/out` directory for various
output examples from the assets in `testdata/in`. Each example uses different
command line options.
To ignore files, pass in regexes using -ignore, for example:
$ go-bindata -ignore=\\.gitignore data/...
### Accessing an asset
To access asset data, we use the `Asset(string) ([]byte, error)` function which
is included in the generated output.
data, err := Asset("pub/style/foo.css")
if err != nil {
// Asset was not found.
}
// use asset data
### Debug vs Release builds
When invoking the program with the `-debug` flag, the generated code does
not actually include the asset data. Instead, it generates function stubs
which load the data from the original file on disk. The asset API remains
identical between debug and release builds, so your code will not have to
change.
This is useful during development when you expect the assets to change often.
The host application using these assets uses the same API in both cases and
will not have to care where the actual data comes from.
An example is a Go webserver with some embedded, static web content like
HTML, JS and CSS files. While developing it, you do not want to rebuild the
whole server and restart it every time you make a change to a bit of
javascript. You just want to build and launch the server once. Then just press
refresh in the browser to see those changes. Embedding the assets with the
`debug` flag allows you to do just that. When you are finished developing and
ready for deployment, just re-invoke `go-bindata` without the `-debug` flag.
It will now embed the latest version of the assets.
### Lower memory footprint
Using the `-nomemcopy` flag, will alter the way the output file is generated.
It will employ a hack that allows us to read the file data directly from
the compiled program's `.rodata` section. This ensures that when we call
call our generated function, we omit unnecessary memcopies.
The downside of this, is that it requires dependencies on the `reflect` and
`unsafe` packages. These may be restricted on platforms like AppEngine and
thus prevent you from using this mode.
Another disadvantage is that the byte slice we create, is strictly read-only.
For most use-cases this is not a problem, but if you ever try to alter the
returned byte slice, a runtime panic is thrown. Use this mode only on target
platforms where memory constraints are an issue.
The default behaviour is to use the old code generation method. This
prevents the two previously mentioned issues, but will employ at least one
extra memcopy and thus increase memory requirements.
For instance, consider the following two examples:
This would be the default mode, using an extra memcopy but gives a safe
implementation without dependencies on `reflect` and `unsafe`:
```go
func myfile() []byte {
return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
}
```
Here is the same functionality, but uses the `.rodata` hack.
The byte slice returned from this example can not be written to without
generating a runtime error.
```go
var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
func myfile() []byte {
var empty [0]byte
sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
b := empty[:]
bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bx.Data = sx.Data
bx.Len = len(_myfile)
bx.Cap = bx.Len
return b
}
```
### Optional compression
When the `-nocompress` flag is given, the supplied resource is *not* GZIP
compressed before being turned into Go code. The data should still be accessed
through a function call, so nothing changes in the usage of the generated file.
This feature is useful if you do not care for compression, or the supplied
resource is already compressed. Doing it again would not add any value and may
even increase the size of the data.
The default behaviour of the program is to use compression.
### Path prefix stripping
The keys used in the `_bindata` map, are the same as the input file name
passed to `go-bindata`. This includes the path. In most cases, this is not
desireable, as it puts potentially sensitive information in your code base.
For this purpose, the tool supplies another command line flag `-prefix`.
This accepts a portion of a path name, which should be stripped off from
the map keys and function names.
For example, running without the `-prefix` flag, we get:
$ go-bindata /path/to/templates/
_bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html
Running with the `-prefix` flag, we get:
$ go-bindata -prefix "/path/to/" /path/to/templates/
_bindata["templates/foo.html"] = templates_foo_html
### Build tags
With the optional `-tags` flag, you can specify any go build tags that
must be fulfilled for the output file to be included in a build. This
is useful when including binary data in multiple formats, where the desired
format is specified at build time with the appropriate tags.
The tags are appended to a `// +build` line in the beginning of the output file
and must follow the build tags syntax specified by the go tool.
### Serve assets with `net/http`
With the `-fs` flag, `go-bindata` will add an `AssetFile()` method returning an `http.FileSystem` interface:
$ go-bindata -fs -prefix "static/" static/
Use `-prefix` flag to strip first level dir, then in your `net/http` router, you can use `AssetFile()` with `http.FileServer()` like:
```go
mux := http.NewServeMux()
mux.Handle("/static", http.FileServer(AssetFile()))
http.ListenAndServe(":8080", mux)
```

1
vendor/github.com/go-bindata/go-bindata/_config.yml generated vendored Normal file
View File

@ -0,0 +1 @@
theme: jekyll-theme-cayman

12
vendor/github.com/go-bindata/go-bindata/asset.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
// Asset holds information about a single asset to be processed.
type Asset struct {
Path string // Full file path.
Name string // Key used in TOC -- name by which asset is referenced.
Func string // Function name for the procedure returning the asset contents.
}

44
vendor/github.com/go-bindata/go-bindata/bytewriter.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"fmt"
"io"
)
var (
newline = []byte{'\n'}
dataindent = []byte{'\t', '\t'}
space = []byte{' '}
)
type ByteWriter struct {
io.Writer
c int
}
func (w *ByteWriter) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return
}
for n = range p {
if w.c%12 == 0 {
w.Writer.Write(newline)
w.Writer.Write(dataindent)
w.c = 0
} else {
w.Writer.Write(space)
}
fmt.Fprintf(w.Writer, "0x%02x,", p[n])
w.c++
}
n++
return
}

208
vendor/github.com/go-bindata/go-bindata/config.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"fmt"
"os"
"path/filepath"
"regexp"
)
// InputConfig defines options on a asset directory to be convert.
type InputConfig struct {
// Path defines a directory containing asset files to be included
// in the generated output.
Path string
// Recusive defines whether subdirectories of Path
// should be recursively included in the conversion.
Recursive bool
}
// Config defines a set of options for the asset conversion.
type Config struct {
// Name of the package to use. Defaults to 'main'.
Package string
// Tags specify a set of optional build tags, which should be
// included in the generated output. The tags are appended to a
// `// +build` line in the beginning of the output file
// and must follow the build tags syntax specified by the go tool.
Tags string
// Input defines the directory path, containing all asset files as
// well as whether to recursively process assets in any sub directories.
Input []InputConfig
// Output defines the output file for the generated code.
// If left empty, this defaults to 'bindata.go' in the current
// working directory.
Output string
// Prefix defines a path prefix which should be stripped from all
// file names when generating the keys in the table of contents.
// For example, running without the `-prefix` flag, we get:
//
// $ go-bindata /path/to/templates
// go_bindata["/path/to/templates/foo.html"] = _path_to_templates_foo_html
//
// Running with the `-prefix` flag, we get:
//
// $ go-bindata -prefix "/path/to/" /path/to/templates/foo.html
// go_bindata["templates/foo.html"] = templates_foo_html
Prefix string
// NoMemCopy will alter the way the output file is generated.
//
// It will employ a hack that allows us to read the file data directly from
// the compiled program's `.rodata` section. This ensures that when we call
// call our generated function, we omit unnecessary mem copies.
//
// The downside of this, is that it requires dependencies on the `reflect` and
// `unsafe` packages. These may be restricted on platforms like AppEngine and
// thus prevent you from using this mode.
//
// Another disadvantage is that the byte slice we create, is strictly read-only.
// For most use-cases this is not a problem, but if you ever try to alter the
// returned byte slice, a runtime panic is thrown. Use this mode only on target
// platforms where memory constraints are an issue.
//
// The default behaviour is to use the old code generation method. This
// prevents the two previously mentioned issues, but will employ at least one
// extra memcopy and thus increase memory requirements.
//
// For instance, consider the following two examples:
//
// This would be the default mode, using an extra memcopy but gives a safe
// implementation without dependencies on `reflect` and `unsafe`:
//
// func myfile() []byte {
// return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
// }
//
// Here is the same functionality, but uses the `.rodata` hack.
// The byte slice returned from this example can not be written to without
// generating a runtime error.
//
// var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
//
// func myfile() []byte {
// var empty [0]byte
// sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
// b := empty[:]
// bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bx.Data = sx.Data
// bx.Len = len(_myfile)
// bx.Cap = bx.Len
// return b
// }
NoMemCopy bool
// NoCompress means the assets are /not/ GZIP compressed before being turned
// into Go code. The generated function will automatically unzip
// the file data when called. Defaults to false.
NoCompress bool
// HttpFileSystem means whether generate return http.FileSystem interface
// instance's function.When true,will generate relate code.
HttpFileSystem bool
// Perform a debug build. This generates an asset file, which
// loads the asset contents directly from disk at their original
// location, instead of embedding the contents in the code.
//
// This is mostly useful if you anticipate that the assets are
// going to change during your development cycle. You will always
// want your code to access the latest version of the asset.
// Only in release mode, will the assets actually be embedded
// in the code. The default behaviour is Release mode.
Debug bool
// Perform a dev build, which is nearly identical to the debug option. The
// only difference is that instead of absolute file paths in generated code,
// it expects a variable, `rootDir`, to be set in the generated code's
// package (the author needs to do this manually), which it then prepends to
// an asset's name to construct the file path on disk.
//
// This is mainly so you can push the generated code file to a shared
// repository.
Dev bool
// When true, size, mode and modtime are not preserved from files
NoMetadata bool
// When nonzero, use this as mode for all files.
Mode uint
// When nonzero, use this as unix timestamp for all files.
ModTime int64
// Ignores any filenames matching the regex pattern specified, e.g.
// path/to/file.ext will ignore only that file, or \\.gitignore
// will match any .gitignore file.
//
// This parameter can be provided multiple times.
Ignore []*regexp.Regexp
}
// NewConfig returns a default configuration struct.
func NewConfig() *Config {
c := new(Config)
c.Package = "main"
c.NoMemCopy = false
c.NoCompress = false
c.HttpFileSystem = false
c.Debug = false
c.Output = "./bindata.go"
c.Ignore = make([]*regexp.Regexp, 0)
return c
}
// validate ensures the config has sane values.
// Part of which means checking if certain file/directory paths exist.
func (c *Config) validate() error {
if len(c.Package) == 0 {
return fmt.Errorf("Missing package name")
}
for _, input := range c.Input {
_, err := os.Lstat(input.Path)
if err != nil {
return fmt.Errorf("Failed to stat input path '%s': %v", input.Path, err)
}
}
if len(c.Output) == 0 {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Unable to determine current working directory.")
}
c.Output = filepath.Join(cwd, "bindata.go")
}
stat, err := os.Lstat(c.Output)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("Output path: %v", err)
}
// File does not exist. This is fine, just make
// sure the directory it is to be in exists.
dir, _ := filepath.Split(c.Output)
if dir != "" {
err = os.MkdirAll(dir, 0744)
if err != nil {
return fmt.Errorf("Create output directory: %v", err)
}
}
}
if stat != nil && stat.IsDir() {
return fmt.Errorf("Output path is a directory.")
}
return nil
}

261
vendor/github.com/go-bindata/go-bindata/convert.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"unicode"
)
// Translate reads assets from an input directory, converts them
// to Go code and writes new files to the output specified
// in the given configuration.
func Translate(c *Config) error {
var toc []Asset
// Ensure our configuration has sane values.
err := c.validate()
if err != nil {
return err
}
var knownFuncs = make(map[string]int)
var visitedPaths = make(map[string]bool)
// Locate all the assets.
for _, input := range c.Input {
err = findFiles(input.Path, c.Prefix, input.Recursive, &toc, c.Ignore, knownFuncs, visitedPaths)
if err != nil {
return err
}
}
// Create output file.
fd, err := os.Create(c.Output)
if err != nil {
return err
}
defer fd.Close()
// Create a buffered writer for better performance.
bfd := bufio.NewWriter(fd)
defer bfd.Flush()
// Write the header. This makes e.g. Github ignore diffs in generated files.
if _, err = fmt.Fprintf(bfd, "// Code generated for package %s by go-bindata DO NOT EDIT. (@generated)\n", c.Package); err != nil {
return err
}
if _, err = fmt.Fprint(bfd, "// sources:\n"); err != nil {
return err
}
wd, err := os.Getwd()
if err != nil {
return err
}
for _, asset := range toc {
relative, _ := filepath.Rel(wd, asset.Path)
if _, err = fmt.Fprintf(bfd, "// %s\n", filepath.ToSlash(relative)); err != nil {
return err
}
}
//if _, err = fmt.Fprint(bfd, "// DO NOT EDIT!\n\n"); err != nil {
// return err
//}
// Write build tags, if applicable.
if len(c.Tags) > 0 {
if _, err = fmt.Fprintf(bfd, "// +build %s\n\n", c.Tags); err != nil {
return err
}
}
// Write package declaration.
_, err = fmt.Fprintf(bfd, "package %s\n\n", c.Package)
if err != nil {
return err
}
// Write assets.
if c.Debug || c.Dev {
err = writeDebug(bfd, c, toc)
} else {
err = writeRelease(bfd, c, toc)
}
if err != nil {
return err
}
// Write table of contents
if err := writeTOC(bfd, toc); err != nil {
return err
}
// Write hierarchical tree of assets
if err := writeTOCTree(bfd, toc); err != nil {
return err
}
// Write restore procedure
return writeRestore(bfd)
}
// ByName implements sort.Interface for []os.FileInfo based on Name()
type ByName []os.FileInfo
func (v ByName) Len() int { return len(v) }
func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v ByName) Less(i, j int) bool { return v[i].Name() < v[j].Name() }
// findFiles recursively finds all the file paths in the given directory tree.
// They are added to the given map as keys. Values will be safe function names
// for each file, which will be used when generating the output code.
func findFiles(dir, prefix string, recursive bool, toc *[]Asset, ignore []*regexp.Regexp, knownFuncs map[string]int, visitedPaths map[string]bool) error {
dirpath := dir
if len(prefix) > 0 {
dirpath, _ = filepath.Abs(dirpath)
prefix, _ = filepath.Abs(prefix)
prefix = filepath.ToSlash(prefix)
}
fi, err := os.Stat(dirpath)
if err != nil {
return err
}
var list []os.FileInfo
if !fi.IsDir() {
dirpath = filepath.Dir(dirpath)
list = []os.FileInfo{fi}
} else {
visitedPaths[dirpath] = true
fd, err := os.Open(dirpath)
if err != nil {
return err
}
defer fd.Close()
list, err = fd.Readdir(0)
if err != nil {
return err
}
// Sort to make output stable between invocations
sort.Sort(ByName(list))
}
for _, file := range list {
var asset Asset
asset.Path = filepath.Join(dirpath, file.Name())
asset.Name = filepath.ToSlash(asset.Path)
ignoring := false
for _, re := range ignore {
if re.MatchString(asset.Path) {
ignoring = true
break
}
}
if ignoring {
continue
}
if file.IsDir() {
if recursive {
recursivePath := filepath.Join(dir, file.Name())
visitedPaths[asset.Path] = true
findFiles(recursivePath, prefix, recursive, toc, ignore, knownFuncs, visitedPaths)
}
continue
} else if file.Mode()&os.ModeSymlink == os.ModeSymlink {
var linkPath string
if linkPath, err = os.Readlink(asset.Path); err != nil {
return err
}
if !filepath.IsAbs(linkPath) {
if linkPath, err = filepath.Abs(dirpath + "/" + linkPath); err != nil {
return err
}
}
if _, ok := visitedPaths[linkPath]; !ok {
visitedPaths[linkPath] = true
findFiles(asset.Path, prefix, recursive, toc, ignore, knownFuncs, visitedPaths)
}
continue
}
if strings.HasPrefix(asset.Name, prefix) {
asset.Name = asset.Name[len(prefix):]
} else {
asset.Name = filepath.Join(dir, file.Name())
}
// If we have a leading slash, get rid of it.
if len(asset.Name) > 0 && asset.Name[0] == '/' {
asset.Name = asset.Name[1:]
}
// This shouldn't happen.
if len(asset.Name) == 0 {
return fmt.Errorf("Invalid file: %v", asset.Path)
}
asset.Func = safeFunctionName(asset.Name, knownFuncs)
asset.Path, _ = filepath.Abs(asset.Path)
*toc = append(*toc, asset)
}
return nil
}
var regFuncName = regexp.MustCompile(`[^a-zA-Z0-9_]`)
// safeFunctionName converts the given name into a name
// which qualifies as a valid function identifier. It
// also compares against a known list of functions to
// prevent conflict based on name translation.
func safeFunctionName(name string, knownFuncs map[string]int) string {
var inBytes, outBytes []byte
var toUpper bool
name = strings.ToLower(name)
inBytes = []byte(name)
for i := 0; i < len(inBytes); i++ {
if regFuncName.Match([]byte{inBytes[i]}) {
toUpper = true
} else if toUpper {
outBytes = append(outBytes, []byte(strings.ToUpper(string(inBytes[i])))...)
toUpper = false
} else {
outBytes = append(outBytes, inBytes[i])
}
}
name = string(outBytes)
// Identifier can't start with a digit.
if unicode.IsDigit(rune(name[0])) {
name = "_" + name
}
if num, ok := knownFuncs[name]; ok {
knownFuncs[name] = num + 1
name = fmt.Sprintf("%s%d", name, num)
} else {
knownFuncs[name] = 2
}
return name
}

108
vendor/github.com/go-bindata/go-bindata/debug.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"fmt"
"io"
)
// writeDebug writes the debug code file.
func writeDebug(w io.Writer, c *Config, toc []Asset) error {
err := writeDebugHeader(w, c)
if err != nil {
return err
}
err = writeAssetFS(w, c)
if err != nil {
return err
}
for i := range toc {
err = writeDebugAsset(w, c, &toc[i])
if err != nil {
return err
}
}
return nil
}
// writeDebugHeader writes output file headers.
// This targets debug builds.
func writeDebugHeader(w io.Writer, c *Config) error {
var header string
if c.HttpFileSystem {
header = `import (
"bytes"
"net/http"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
} else {
header = `import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"`
}
_, err := fmt.Fprintf(w, `%s
)
// bindataRead reads the given file from disk. It returns an error on failure.
func bindataRead(path, name string) ([]byte, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
err = fmt.Errorf("Error reading asset %%s at %%s: %%v", name, path, err)
}
return buf, err
}
type asset struct {
bytes []byte
info os.FileInfo
}
`, header)
return err
}
// writeDebugAsset write a debug entry for the given asset.
// A debug entry is simply a function which reads the asset from
// the original file (e.g.: from disk).
func writeDebugAsset(w io.Writer, c *Config, asset *Asset) error {
pathExpr := fmt.Sprintf("%q", asset.Path)
if c.Dev {
pathExpr = fmt.Sprintf("filepath.Join(rootDir, %q)", asset.Name)
}
_, err := fmt.Fprintf(w, `// %s reads file data from disk. It returns an error on failure.
func %s() (*asset, error) {
path := %s
name := %q
bytes, err := bindataRead(path, name)
if err != nil {
return nil, err
}
fi, err := os.Stat(path)
if err != nil {
err = fmt.Errorf("Error reading asset info %%s at %%s: %%v", name, path, err)
}
a := &asset{bytes: bytes, info: fi}
return a, err
}
`, asset.Func, asset.Func, pathExpr, asset.Name)
return err
}

129
vendor/github.com/go-bindata/go-bindata/doc.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
/*
bindata converts any file into managable Go source code. Useful for
embedding binary data into a go program. The file data is optionally gzip
compressed before being converted to a raw byte slice.
The following paragraphs cover some of the customization options
which can be specified in the Config struct, which must be passed into
the Translate() call.
Debug vs Release builds
When used with the `Debug` option, the generated code does not actually include
the asset data. Instead, it generates function stubs which load the data from
the original file on disk. The asset API remains identical between debug and
release builds, so your code will not have to change.
This is useful during development when you expect the assets to change often.
The host application using these assets uses the same API in both cases and
will not have to care where the actual data comes from.
An example is a Go webserver with some embedded, static web content like
HTML, JS and CSS files. While developing it, you do not want to rebuild the
whole server and restart it every time you make a change to a bit of
javascript. You just want to build and launch the server once. Then just press
refresh in the browser to see those changes. Embedding the assets with the
`debug` flag allows you to do just that. When you are finished developing and
ready for deployment, just re-invoke `go-bindata` without the `-debug` flag.
It will now embed the latest version of the assets.
Lower memory footprint
The `NoMemCopy` option will alter the way the output file is generated.
It will employ a hack that allows us to read the file data directly from
the compiled program's `.rodata` section. This ensures that when we call
call our generated function, we omit unnecessary memcopies.
The downside of this, is that it requires dependencies on the `reflect` and
`unsafe` packages. These may be restricted on platforms like AppEngine and
thus prevent you from using this mode.
Another disadvantage is that the byte slice we create, is strictly read-only.
For most use-cases this is not a problem, but if you ever try to alter the
returned byte slice, a runtime panic is thrown. Use this mode only on target
platforms where memory constraints are an issue.
The default behaviour is to use the old code generation method. This
prevents the two previously mentioned issues, but will employ at least one
extra memcopy and thus increase memory requirements.
For instance, consider the following two examples:
This would be the default mode, using an extra memcopy but gives a safe
implementation without dependencies on `reflect` and `unsafe`:
func myfile() []byte {
return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
}
Here is the same functionality, but uses the `.rodata` hack.
The byte slice returned from this example can not be written to without
generating a runtime error.
var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
func myfile() []byte {
var empty [0]byte
sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
b := empty[:]
bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bx.Data = sx.Data
bx.Len = len(_myfile)
bx.Cap = bx.Len
return b
}
Optional compression
The NoCompress option indicates that the supplied assets are *not* GZIP
compressed before being turned into Go code. The data should still be accessed
through a function call, so nothing changes in the API.
This feature is useful if you do not care for compression, or the supplied
resource is already compressed. Doing it again would not add any value and may
even increase the size of the data.
The default behaviour of the program is to use compression.
Path prefix stripping
The keys used in the `_bindata` map are the same as the input file name
passed to `go-bindata`. This includes the path. In most cases, this is not
desireable, as it puts potentially sensitive information in your code base.
For this purpose, the tool supplies another command line flag `-prefix`.
This accepts a portion of a path name, which should be stripped off from
the map keys and function names.
For example, running without the `-prefix` flag, we get:
$ go-bindata /path/to/templates/
_bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html
Running with the `-prefix` flag, we get:
$ go-bindata -prefix "/path/to/" /path/to/templates/
_bindata["templates/foo.html"] = templates_foo_html
Build tags
With the optional Tags field, you can specify any go build tags that
must be fulfilled for the output file to be included in a build. This
is useful when including binary data in multiple formats, where the desired
format is specified at build time with the appropriate tags.
The tags are appended to a `// +build` line in the beginning of the output file
and must follow the build tags syntax specified by the go tool.
*/
package bindata

102
vendor/github.com/go-bindata/go-bindata/file.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
package bindata
import (
"fmt"
"io"
)
func writeAssetFS(w io.Writer, c *Config) error {
if !c.HttpFileSystem {
return nil
}
_, err := fmt.Fprintf(w, `
type assetFile struct {
*bytes.Reader
name string
childInfos []os.FileInfo
childInfoOffset int
}
type assetOperator struct{}
// Open implement http.FileSystem interface
func (f *assetOperator) Open(name string) (http.File, error) {
var err error
if len(name) > 0 && name[0] == '/' {
name = name[1:]
}
content, err := Asset(name)
if err == nil {
return &assetFile{name: name, Reader: bytes.NewReader(content)}, nil
}
children, err := AssetDir(name)
if err == nil {
childInfos := make([]os.FileInfo, 0, len(children))
for _, child := range children {
childPath := filepath.Join(name, child)
info, errInfo := AssetInfo(filepath.Join(name, child))
if errInfo == nil {
childInfos = append(childInfos, info)
} else {
childInfos = append(childInfos, newDirFileInfo(childPath))
}
}
return &assetFile{name: name, childInfos: childInfos}, nil
} else {
// If the error is not found, return an error that will
// result in a 404 error. Otherwise the server returns
// a 500 error for files not found.
if strings.Contains(err.Error(), "not found") {
return nil, os.ErrNotExist
}
return nil, err
}
}
// Close no need do anything
func (f *assetFile) Close() error {
return nil
}
// Readdir read dir's children file info
func (f *assetFile) Readdir(count int) ([]os.FileInfo, error) {
if len(f.childInfos) == 0 {
return nil, os.ErrNotExist
}
if count <= 0 {
return f.childInfos, nil
}
if f.childInfoOffset+count > len(f.childInfos) {
count = len(f.childInfos) - f.childInfoOffset
}
offset := f.childInfoOffset
f.childInfoOffset += count
return f.childInfos[offset : offset+count], nil
}
// Stat read file info from asset item
func (f *assetFile) Stat() (os.FileInfo, error) {
if len(f.childInfos) != 0 {
return newDirFileInfo(f.name), nil
}
return AssetInfo(f.name)
}
// newDirFileInfo return default dir file info
func newDirFileInfo(name string) os.FileInfo {
return &bindataFileInfo{
name: name,
size: 0,
mode: os.FileMode(2147484068), // equal os.FileMode(0644)|os.ModeDir
modTime: time.Time{}}
}
// AssetFile return a http.FileSystem instance that data backend by asset
func AssetFile() http.FileSystem {
return &assetOperator{}
}
`)
return err
}

View File

@ -0,0 +1 @@
go-bindata

View File

@ -0,0 +1,22 @@
package main
import "strings"
// borrowed from https://github.com/hashicorp/serf/blob/master/command/agent/flag_slice_value.go
// AppendSliceValue implements the flag.Value interface and allows multiple
// calls to the same variable to append a list.
type AppendSliceValue []string
func (s *AppendSliceValue) String() string {
return strings.Join(*s, ",")
}
func (s *AppendSliceValue) Set(value string) error {
if *s == nil {
*s = make([]string, 0, 1)
}
*s = append(*s, value)
return nil
}

View File

@ -0,0 +1,108 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/go-bindata/go-bindata"
)
func main() {
cfg := parseArgs()
err := bindata.Translate(cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "bindata: %v\n", err)
os.Exit(1)
}
}
// parseArgs create s a new, filled configuration instance
// by reading and parsing command line options.
//
// This function exits the program with an error, if
// any of the command line options are incorrect.
func parseArgs() *bindata.Config {
var version bool
c := bindata.NewConfig()
flag.Usage = func() {
fmt.Printf("Usage: %s [options] <input directories>\n\n", os.Args[0])
flag.PrintDefaults()
}
flag.BoolVar(&c.Debug, "debug", c.Debug, "Do not embed the assets, but provide the embedding API. Contents will still be loaded from disk.")
flag.BoolVar(&c.Dev, "dev", c.Dev, "Similar to debug, but does not emit absolute paths. Expects a rootDir variable to already exist in the generated code's package.")
flag.StringVar(&c.Tags, "tags", c.Tags, "Optional set of build tags to include.")
flag.StringVar(&c.Prefix, "prefix", c.Prefix, "Optional path prefix to strip off asset names.")
flag.StringVar(&c.Package, "pkg", c.Package, "Package name to use in the generated code.")
flag.BoolVar(&c.NoMemCopy, "nomemcopy", c.NoMemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.")
flag.BoolVar(&c.NoCompress, "nocompress", c.NoCompress, "Assets will *not* be GZIP compressed when this flag is specified.")
flag.BoolVar(&c.NoMetadata, "nometadata", c.NoMetadata, "Assets will not preserve size, mode, and modtime info.")
flag.BoolVar(&c.HttpFileSystem, "fs", c.HttpFileSystem, "Whether generate instance http.FileSystem interface code.")
flag.UintVar(&c.Mode, "mode", c.Mode, "Optional file mode override for all files.")
flag.Int64Var(&c.ModTime, "modtime", c.ModTime, "Optional modification unix timestamp override for all files.")
flag.StringVar(&c.Output, "o", c.Output, "Optional name of the output file to be generated.")
flag.BoolVar(&version, "version", false, "Displays version information.")
ignore := make([]string, 0)
flag.Var((*AppendSliceValue)(&ignore), "ignore", "Regex pattern to ignore")
flag.Parse()
patterns := make([]*regexp.Regexp, 0)
for _, pattern := range ignore {
patterns = append(patterns, regexp.MustCompile(pattern))
}
c.Ignore = patterns
if version {
fmt.Printf("%s\n", Version())
os.Exit(0)
}
// Make sure we have input paths.
if flag.NArg() == 0 {
fmt.Fprintf(os.Stderr, "Missing <input dir>\n\n")
flag.Usage()
os.Exit(1)
}
// Create input configurations.
c.Input = make([]bindata.InputConfig, flag.NArg())
for i := range c.Input {
c.Input[i] = parseInput(flag.Arg(i))
}
return c
}
// parseRecursive determines whether the given path has a recrusive indicator and
// returns a new path with the recursive indicator chopped off if it does.
//
// ex:
// /path/to/foo/... -> (/path/to/foo, true)
// /path/to/bar -> (/path/to/bar, false)
func parseInput(path string) bindata.InputConfig {
if strings.HasSuffix(path, "/...") {
return bindata.InputConfig{
Path: filepath.Clean(path[:len(path)-4]),
Recursive: true,
}
} else {
return bindata.InputConfig{
Path: filepath.Clean(path),
Recursive: false,
}
}
}

View File

@ -0,0 +1,31 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package main
import (
"fmt"
"runtime"
)
const (
AppName = "go-bindata"
AppVersionMajor = 3
AppVersionMinor = 1
)
// revision part of the program version.
// This will be set automatically at build time like so:
//
// go build -ldflags "-X main.AppVersionRev `date -u +%s`"
var AppVersionRev string
func Version() string {
if len(AppVersionRev) == 0 {
AppVersionRev = "2"
}
return fmt.Sprintf("%s %d.%d.%s (Go runtime %s).\nCopyright (c) 2010-2013, Jim Teeuwen.",
AppName, AppVersionMajor, AppVersionMinor, AppVersionRev, runtime.Version())
}

473
vendor/github.com/go-bindata/go-bindata/release.go generated vendored Normal file
View File

@ -0,0 +1,473 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"unicode/utf8"
)
// writeRelease writes the release code file.
func writeRelease(w io.Writer, c *Config, toc []Asset) error {
err := writeReleaseHeader(w, c)
if err != nil {
return err
}
err = writeAssetFS(w, c)
if err != nil {
return err
}
for i := range toc {
err = writeReleaseAsset(w, c, &toc[i])
if err != nil {
return err
}
}
return nil
}
// writeReleaseHeader writes output file headers.
// This targets release builds.
func writeReleaseHeader(w io.Writer, c *Config) error {
var err error
if c.NoCompress {
if c.NoMemCopy {
err = header_uncompressed_nomemcopy(w, c)
} else {
err = header_uncompressed_memcopy(w, c)
}
} else {
if c.NoMemCopy {
err = header_compressed_nomemcopy(w, c)
} else {
err = header_compressed_memcopy(w, c)
}
}
if err != nil {
return err
}
return header_release_common(w)
}
// writeReleaseAsset write a release entry for the given asset.
// A release entry is a function which embeds and returns
// the file's byte content.
func writeReleaseAsset(w io.Writer, c *Config, asset *Asset) error {
fd, err := os.Open(asset.Path)
if err != nil {
return err
}
defer fd.Close()
if c.NoCompress {
if c.NoMemCopy {
err = uncompressed_nomemcopy(w, asset, fd)
} else {
err = uncompressed_memcopy(w, asset, fd)
}
} else {
if c.NoMemCopy {
err = compressed_nomemcopy(w, asset, fd)
} else {
err = compressed_memcopy(w, asset, fd)
}
}
if err != nil {
return err
}
return asset_release_common(w, c, asset)
}
// sanitize prepares a valid UTF-8 string as a raw string constant.
// Based on https://code.google.com/p/go/source/browse/godoc/static/makestatic.go?repo=tools
func sanitize(b []byte) []byte {
// Replace ` with `+"`"+`
b = bytes.Replace(b, []byte("`"), []byte("`+\"`\"+`"), -1)
// Replace BOM with `+"\xEF\xBB\xBF"+`
// (A BOM is valid UTF-8 but not permitted in Go source files.
// I wouldn't bother handling this, but for some insane reason
// jquery.js has a BOM somewhere in the middle.)
return bytes.Replace(b, []byte("\xEF\xBB\xBF"), []byte("`+\"\\xEF\\xBB\\xBF\"+`"), -1)
}
func header_compressed_nomemcopy(w io.Writer, c *Config) error {
var header string
if c.HttpFileSystem {
header = `import (
"bytes"
"compress/gzip"
"fmt"
"net/http"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
} else {
header = `import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
}
_, err := fmt.Fprintf(w, `%s
)
func bindataRead(data, name string) ([]byte, error) {
gz, err := gzip.NewReader(strings.NewReader(data))
if err != nil {
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
`, header)
return err
}
func header_compressed_memcopy(w io.Writer, c *Config) error {
var header string
if c.HttpFileSystem {
header = `import (
"bytes"
"compress/gzip"
"fmt"
"net/http"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
} else {
header = `import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
}
_, err := fmt.Fprintf(w, `%s
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
`, header)
return err
}
func header_uncompressed_nomemcopy(w io.Writer, c *Config) error {
var header string
if c.HttpFileSystem {
header = `import (
"bytes"
"fmt"
"net/http"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"time"
"unsafe"`
} else {
header = `import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"time"
"unsafe"`
}
_, err := fmt.Fprintf(w, `%s
)
func bindataRead(data, name string) ([]byte, error) {
var empty [0]byte
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
b := empty[:]
bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bx.Data = sx.Data
bx.Len = len(data)
bx.Cap = bx.Len
return b, nil
}
`, header)
return err
}
func header_uncompressed_memcopy(w io.Writer, c *Config) error {
var header string
if c.HttpFileSystem {
header = `import (
"bytes"
"fmt"
"net/http"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
} else {
header = `import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"`
}
_, err := fmt.Fprintf(w, `%s
)
`, header)
return err
}
func header_release_common(w io.Writer) error {
_, err := fmt.Fprintf(w, `type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
`)
return err
}
func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `var _%s = "`, asset.Func)
if err != nil {
return err
}
gz := gzip.NewWriter(&StringWriter{Writer: w})
_, err = io.Copy(gz, r)
gz.Close()
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `"
func %sBytes() ([]byte, error) {
return bindataRead(
_%s,
%q,
)
}
`, asset.Func, asset.Func, asset.Name)
return err
}
func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `var _%s = []byte("`, asset.Func)
if err != nil {
return err
}
gz := gzip.NewWriter(&StringWriter{Writer: w})
_, err = io.Copy(gz, r)
gz.Close()
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `")
func %sBytes() ([]byte, error) {
return bindataRead(
_%s,
%q,
)
}
`, asset.Func, asset.Func, asset.Name)
return err
}
func uncompressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `var _%s = "`, asset.Func)
if err != nil {
return err
}
_, err = io.Copy(&StringWriter{Writer: w}, r)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `"
func %sBytes() ([]byte, error) {
return bindataRead(
_%s,
%q,
)
}
`, asset.Func, asset.Func, asset.Name)
return err
}
func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `var _%s = []byte(`, asset.Func)
if err != nil {
return err
}
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
if utf8.Valid(b) && !bytes.Contains(b, []byte{0}) {
fmt.Fprintf(w, "`%s`", sanitize(b))
} else {
fmt.Fprintf(w, "%+q", b)
}
_, err = fmt.Fprintf(w, `)
func %sBytes() ([]byte, error) {
return _%s, nil
}
`, asset.Func, asset.Func)
return err
}
func asset_release_common(w io.Writer, c *Config, asset *Asset) error {
fi, err := os.Stat(asset.Path)
if err != nil {
return err
}
mode := uint(fi.Mode())
modTime := fi.ModTime().Unix()
size := fi.Size()
if c.NoMetadata {
mode = 0
modTime = 0
size = 0
}
if c.Mode > 0 {
mode = uint(os.ModePerm) & c.Mode
}
if c.ModTime > 0 {
modTime = c.ModTime
}
_, err = fmt.Fprintf(w, `func %s() (*asset, error) {
bytes, err := %sBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: %q, size: %d, mode: os.FileMode(%d), modTime: time.Unix(%d, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
`, asset.Func, asset.Func, asset.Name, size, mode, modTime)
return err
}

62
vendor/github.com/go-bindata/go-bindata/restore.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"fmt"
"io"
)
func writeRestore(w io.Writer) error {
_, err := fmt.Fprintf(w, `
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
`)
return err
}

View File

@ -0,0 +1,36 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"io"
)
const lowerHex = "0123456789abcdef"
type StringWriter struct {
io.Writer
c int
}
func (w *StringWriter) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return
}
buf := []byte(`\x00`)
var b byte
for n, b = range p {
buf[2] = lowerHex[b/16]
buf[3] = lowerHex[b%16]
w.Writer.Write(buf)
w.c++
}
n++
return
}

288
vendor/github.com/go-bindata/go-bindata/toc.go generated vendored Normal file
View File

@ -0,0 +1,288 @@
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"fmt"
"io"
"sort"
"strings"
)
type assetTree struct {
Asset Asset
Children map[string]*assetTree
}
func newAssetTree() *assetTree {
tree := &assetTree{}
tree.Children = make(map[string]*assetTree)
return tree
}
func (node *assetTree) child(name string) *assetTree {
rv, ok := node.Children[name]
if !ok {
rv = newAssetTree()
node.Children[name] = rv
}
return rv
}
func (root *assetTree) Add(route []string, asset Asset) {
for _, name := range route {
root = root.child(name)
}
root.Asset = asset
}
func ident(w io.Writer, n int) {
for i := 0; i < n; i++ {
w.Write([]byte{'\t'})
}
}
func (root *assetTree) funcOrNil() string {
if root.Asset.Func == "" {
return "nil"
} else {
return root.Asset.Func
}
}
func getFillerSize(tokenIndex int, lengths []int, nident int) int {
var (
curlen int = lengths[tokenIndex]
maxlen int = 0
substart int = 0
subend int = 0
spacediff int = 0
)
if curlen > 0 {
substart = tokenIndex
for (substart-1) >= 0 && lengths[substart-1] > 0 {
substart -= 1
}
subend = tokenIndex
for (subend+1) < len(lengths) && lengths[subend+1] > 0 {
subend += 1
}
var candidate int
for j := substart; j <= subend; j += 1 {
candidate = lengths[j]
if candidate > maxlen {
maxlen = candidate
}
}
spacediff = maxlen - curlen
}
return spacediff
}
func (root *assetTree) writeGoMap(w io.Writer, nident int) {
fmt.Fprintf(w, "&bintree{%s, map[string]*bintree{", root.funcOrNil())
if len(root.Children) > 0 {
io.WriteString(w, "\n")
// Sort to make output stable between invocations
filenames := make([]string, len(root.Children))
hasChildren := make(map[string]bool)
i := 0
for filename, node := range root.Children {
filenames[i] = filename
hasChildren[filename] = len(node.Children) > 0
i++
}
sort.Strings(filenames)
lengths := make([]int, len(root.Children))
for i, filename := range filenames {
if hasChildren[filename] {
lengths[i] = 0
} else {
lengths[i] = len(filename)
}
}
for i, p := range filenames {
ident(w, nident+1)
filler := strings.Repeat(" ", getFillerSize(i, lengths, nident))
fmt.Fprintf(w, `"%s": %s`, p, filler)
root.Children[p].writeGoMap(w, nident+1)
}
ident(w, nident)
}
io.WriteString(w, "}}")
if nident > 0 {
io.WriteString(w, ",")
}
io.WriteString(w, "\n")
}
func (root *assetTree) WriteAsGoMap(w io.Writer) error {
_, err := fmt.Fprint(w, `type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = `)
root.writeGoMap(w, 0)
return err
}
func writeTOCTree(w io.Writer, toc []Asset) error {
_, err := fmt.Fprintf(w, `// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %%s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %%s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
`)
if err != nil {
return err
}
tree := newAssetTree()
for i := range toc {
pathList := strings.Split(toc[i].Name, "/")
tree.Add(pathList, toc[i])
}
return tree.WriteAsGoMap(w)
}
// writeTOC writes the table of contents file.
func writeTOC(w io.Writer, toc []Asset) error {
err := writeTOCHeader(w)
if err != nil {
return err
}
var maxlen = 0
for i := range toc {
l := len(toc[i].Name)
if l > maxlen {
maxlen = l
}
}
for i := range toc {
err = writeTOCAsset(w, &toc[i], maxlen)
if err != nil {
return err
}
}
return writeTOCFooter(w)
}
// writeTOCHeader writes the table of contents file header.
func writeTOCHeader(w io.Writer) error {
_, err := fmt.Fprintf(w, `// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %%s can't read by error: %%v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %%s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %%s can't read by error: %%v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %%s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
`)
return err
}
// writeTOCAsset write a TOC entry for the given asset.
func writeTOCAsset(w io.Writer, asset *Asset, maxlen int) error {
spacediff := maxlen - len(asset.Name)
filler := strings.Repeat(" ", spacediff)
_, err := fmt.Fprintf(w, "\t%q: %s%s,\n", asset.Name, filler, asset.Func)
return err
}
// writeTOCFooter writes the table of contents file footer.
func writeTOCFooter(w io.Writer) error {
_, err := fmt.Fprintf(w, `}
`)
return err
}

View File

@ -1,33 +0,0 @@
package rke
import (
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
)
func loadCisConfigParams() map[string]v3.CisConfigParams {
return map[string]v3.CisConfigParams{
"default": {
BenchmarkVersion: "rke-cis-1.4",
},
"v1.15": {
BenchmarkVersion: "rke-cis-1.4",
},
}
}
func loadCisBenchmarkVersionInfo() map[string]v3.CisBenchmarkVersionInfo {
return map[string]v3.CisBenchmarkVersionInfo{
"rke-cis-1.4": {
MinKubernetesVersion: "1.13",
},
"cis-1.4": {
MinKubernetesVersion: "1.13",
},
"rke-cis-1.5": {
MinKubernetesVersion: "1.15",
},
"cis-1.5": {
MinKubernetesVersion: "1.15",
},
}
}

View File

@ -1,179 +0,0 @@
package rke
import (
"bytes"
"encoding/json"
"fmt"
"github.com/blang/semver"
"github.com/rancher/kontainer-driver-metadata/rke/templates"
"github.com/sirupsen/logrus"
"os"
"strings"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/image"
)
const (
rkeDataFilePath = "./data/data.json"
)
// Data to be written in dataFilePath, dynamically populated on init() with the latest versions
type Data struct {
// K8sVersionServiceOptions - service options per k8s version
K8sVersionServiceOptions map[string]v3.KubernetesServicesOptions
K8sVersionRKESystemImages map[string]v3.RKESystemImages
// Addon Templates per K8s version ("default" where nothing changes for k8s version)
K8sVersionedTemplates map[string]map[string]string
// K8sVersionInfo - min/max RKE+Rancher versions per k8s version
K8sVersionInfo map[string]v3.K8sVersionInfo
//Default K8s version for every rancher version
RancherDefaultK8sVersions map[string]string
//Default K8s version for every rke version
RKEDefaultK8sVersions map[string]string
K8sVersionDockerInfo map[string][]string
// K8sVersionWindowsServiceOptions - service options per windows k8s version
K8sVersionWindowsServiceOptions map[string]v3.KubernetesServicesOptions
CisConfigParams map[string]v3.CisConfigParams
CisBenchmarkVersionInfo map[string]v3.CisBenchmarkVersionInfo
}
var (
DriverData Data
TemplateData map[string]map[string]string
MissedTemplate map[string][]string
m = image.Mirror
)
func init() {
DriverData = Data{
K8sVersionRKESystemImages: loadK8sRKESystemImages(),
}
for version, images := range DriverData.K8sVersionRKESystemImages {
longName := "rancher/hyperkube:" + version
if !strings.HasPrefix(longName, images.Kubernetes) {
panic(fmt.Sprintf("For K8s version %s, the Kubernetes image tag should be a substring of %s, currently it is %s", version, version, images.Kubernetes))
}
}
DriverData.RKEDefaultK8sVersions = loadRKEDefaultK8sVersions()
DriverData.RancherDefaultK8sVersions = loadRancherDefaultK8sVersions()
validateDefaultPresent(DriverData.RKEDefaultK8sVersions)
DriverData.K8sVersionedTemplates = templates.LoadK8sVersionedTemplates()
validateTemplateMatch()
DriverData.K8sVersionServiceOptions = loadK8sVersionServiceOptions()
DriverData.K8sVersionInfo = loadK8sVersionInfo()
// init Windows versions
DriverData.K8sVersionWindowsServiceOptions = loadK8sVersionWindowsServiceOptions()
DriverData.K8sVersionDockerInfo = loadK8sVersionDockerInfo()
// CIS
DriverData.CisConfigParams = loadCisConfigParams()
DriverData.CisBenchmarkVersionInfo = loadCisBenchmarkVersionInfo()
}
func validateDefaultPresent(versions map[string]string) {
for _, defaultK8s := range versions {
if _, ok := DriverData.K8sVersionRKESystemImages[defaultK8s]; !ok {
panic(fmt.Sprintf("Default K8s version %v is not found in the K8sVersionToRKESystemImages", defaultK8s))
}
}
}
func validateTemplateMatch() {
TemplateData = map[string]map[string]string{}
MissedTemplate = map[string][]string{}
for k8sVersion := range DriverData.K8sVersionRKESystemImages {
toMatch, err := semver.Make(k8sVersion[1:])
if err != nil {
panic(fmt.Sprintf("k8sVersion not sem-ver %s %v", k8sVersion, err))
}
TemplateData[k8sVersion] = map[string]string{}
for plugin, pluginData := range DriverData.K8sVersionedTemplates {
if plugin == templates.TemplateKeys {
continue
}
matchedKey := ""
matchedRange := ""
for toTestRange, key := range pluginData {
testRange, err := semver.ParseRange(toTestRange)
if err != nil {
panic(fmt.Sprintf("range for %s not sem-ver %v %v", plugin, testRange, err))
}
if testRange(toMatch) {
// only one range should be matched
if matchedKey != "" {
panic(fmt.Sprintf("k8sVersion %s for plugin %s passing range %s, conflict range matching with %s",
k8sVersion, plugin, toTestRange, matchedRange))
}
matchedKey = key
matchedRange = toTestRange
}
}
if matchedKey == "" {
if val, ok := MissedTemplate[plugin]; ok {
val = append(val, k8sVersion)
MissedTemplate[plugin] = val
} else {
MissedTemplate[plugin] = []string{k8sVersion}
}
continue
}
TemplateData[k8sVersion][plugin] = fmt.Sprintf("range=%s key=%s", matchedRange, matchedKey)
}
}
}
func GenerateData() {
if len(os.Args) == 2 {
splitStr := strings.SplitN(os.Args[1], "=", 2)
if len(splitStr) == 2 {
if splitStr[0] == "--write-data" && splitStr[1] == "true" {
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", " ")
if err := enc.Encode(TemplateData); err != nil {
panic(fmt.Sprintf("error encoding template data %v", err))
}
fmt.Println(buf.String())
if len(MissedTemplate) != 0 {
logrus.Warnf("found k8s versions without a template")
for plugin, data := range MissedTemplate {
logrus.Warnf("no %s template for k8sVersions %v \n", plugin, data)
}
}
fmt.Println("generating data.json")
//todo: zip file
strData, _ := json.MarshalIndent(DriverData, "", " ")
jsonFile, err := os.Create(rkeDataFilePath)
if err != nil {
panic(fmt.Errorf("err creating data file %v", err))
}
defer jsonFile.Close()
_, err = jsonFile.Write(strData)
if err != nil {
panic(fmt.Errorf("err writing jsonFile %v", err))
}
}
}
}
}

View File

@ -1,16 +0,0 @@
package rke
func loadK8sVersionDockerInfo() map[string][]string {
return map[string][]string{
"1.8": {"1.11.x", "1.12.x", "1.13.x", "17.03.x"},
"1.9": {"1.11.x", "1.12.x", "1.13.x", "17.03.x", "18.06.x", "18.09.x", "19.03.x"},
"1.10": {"1.11.x", "1.12.x", "1.13.x", "17.03.x", "18.06.x", "18.09.x", "19.03.x"},
"1.11": {"1.11.x", "1.12.x", "1.13.x", "17.03.x", "18.06.x", "18.09.x", "19.03.x"},
"1.12": {"1.11.x", "1.12.x", "1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
"1.13": {"1.11.x", "1.12.x", "1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
"1.14": {"1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
"1.15": {"1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
"1.16": {"1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
"1.17": {"1.13.x", "17.03.x", "17.06.x", "17.09.x", "18.06.x", "18.09.x", "19.03.x"},
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,281 +0,0 @@
package rke
import (
"fmt"
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
)
const (
tlsCipherSuites = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
enableAdmissionPlugins = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction"
)
func loadK8sVersionServiceOptions() map[string]v3.KubernetesServicesOptions {
return map[string]v3.KubernetesServicesOptions{
"v1.17": {
Etcd: getETCDOptions117(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.16.3-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.16.4-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.16.6-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.16.6-rancher1-2": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.16": {
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.15.6-rancher1-2": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions115(),
Kubelet: getKubeletOptions115(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.15.7-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions115(),
Kubelet: getKubeletOptions115(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.15.9-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions115(),
Kubelet: getKubeletOptions115(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.15": {
KubeAPI: getKubeAPIOptions115(),
Kubelet: getKubeletOptions115(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.14.9-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions114(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.14": {
KubeAPI: getKubeAPIOptions114(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.13": {
KubeAPI: getKubeAPIOptions(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.12": {
KubeAPI: getKubeAPIOptions(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.11": {
KubeAPI: getKubeAPIOptions(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.10": {
KubeAPI: getKubeAPIOptions(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.9": {
KubeAPI: getKubeAPIOptions19(),
Kubelet: getKubeletOptions(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
}
}
func getKubeAPIOptions() map[string]string {
data := map[string]string{
"tls-cipher-suites": tlsCipherSuites,
"enable-admission-plugins": enableAdmissionPlugins, // order doesn't matter >= 1.10
"allow-privileged": "true",
"anonymous-auth": "false",
"bind-address": "0.0.0.0",
"insecure-port": "0",
"kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
"profiling": "false",
"requestheader-extra-headers-prefix": "X-Remote-Extra-",
"requestheader-group-headers": "X-Remote-Group",
"requestheader-username-headers": "X-Remote-User",
"secure-port": "6443",
"service-account-lookup": "true",
"storage-backend": "etcd3",
"runtime-config": "authorization.k8s.io/v1beta1=true",
}
return data
}
func getKubeAPIOptions19() map[string]string {
kubeAPIOptions := getKubeAPIOptions()
kubeAPIOptions["admission-control"] = "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction"
return kubeAPIOptions
}
func getKubeAPIOptions114() map[string]string {
kubeAPIOptions := getKubeAPIOptions()
kubeAPIOptions["enable-admission-plugins"] = fmt.Sprintf("%s,%s", enableAdmissionPlugins, "Priority")
kubeAPIOptions["runtime-config"] = "authorization.k8s.io/v1beta1=true"
return kubeAPIOptions
}
func getKubeAPIOptions115() map[string]string {
kubeAPIOptions := getKubeAPIOptions114()
kubeAPIOptions["enable-admission-plugins"] = fmt.Sprintf("%s,%s", kubeAPIOptions["enable-admission-plugins"], "TaintNodesByCondition,PersistentVolumeClaimResize")
kubeAPIOptions["runtime-config"] = "authorization.k8s.io/v1beta1=true"
return kubeAPIOptions
}
func getKubeAPIOptions116() map[string]string {
kubeAPIOptions := getKubeAPIOptions114()
kubeAPIOptions["enable-admission-plugins"] = fmt.Sprintf("%s,%s", kubeAPIOptions["enable-admission-plugins"], "TaintNodesByCondition,PersistentVolumeClaimResize")
kubeAPIOptions["runtime-config"] = "authorization.k8s.io/v1beta1=true"
return kubeAPIOptions
}
// getKubeletOptions provides the root options for windows
// note: please double-check on windows side if changing the following options
func getKubeletOptions() map[string]string {
return map[string]string{
"tls-cipher-suites": tlsCipherSuites,
"address": "0.0.0.0",
"allow-privileged": "true",
"anonymous-auth": "false",
"authentication-token-webhook": "true",
"cgroups-per-qos": "True",
"cni-bin-dir": "/opt/cni/bin",
"cni-conf-dir": "/etc/cni/net.d",
"enforce-node-allocatable": "",
"event-qps": "0",
"make-iptables-util-chains": "true",
"network-plugin": "cni",
"read-only-port": "0",
"resolv-conf": "/etc/resolv.conf",
"streaming-connection-idle-timeout": "30m",
"volume-plugin-dir": "/var/lib/kubelet/volumeplugins",
"v": "2",
"authorization-mode": "Webhook",
}
}
func getKubeletOptions115() map[string]string {
kubeletOptions := getKubeletOptions()
kubeletOptions["authorization-mode"] = "Webhook"
delete(kubeletOptions, "allow-privileged")
return kubeletOptions
}
func getKubeletOptions116() map[string]string {
kubeletOptions := getKubeletOptions()
kubeletOptions["authorization-mode"] = "Webhook"
delete(kubeletOptions, "allow-privileged")
return kubeletOptions
}
func getKubeControllerOptions() map[string]string {
return map[string]string{
"address": "0.0.0.0",
"allow-untagged-cloud": "true",
"allocate-node-cidrs": "true",
"configure-cloud-routes": "false",
"enable-hostpath-provisioner": "false",
"leader-elect": "true",
"node-monitor-grace-period": "40s",
"pod-eviction-timeout": "5m0s",
"profiling": "false",
"terminated-pod-gc-threshold": "1000",
"v": "2",
}
}
// getKubeProxyOptions provides the root options for windows
// note: please double-check on windows side if changing the following options
func getKubeProxyOptions() map[string]string {
return map[string]string{
"v": "2",
"healthz-bind-address": "127.0.0.1",
}
}
func getSchedulerOptions() map[string]string {
return map[string]string{
"leader-elect": "true",
"v": "2",
"address": "0.0.0.0",
"profiling": "false",
}
}
func getETCDOptions() map[string]string {
return map[string]string{
"client-cert-auth": "true",
"peer-client-cert-auth": "true",
}
}
func getETCDOptions117() map[string]string {
return map[string]string{
"client-cert-auth": "true",
"peer-client-cert-auth": "true",
"enable-v2": "true",
}
}

View File

@ -1,87 +0,0 @@
package rke
import v3 "github.com/rancher/types/apis/management.cattle.io/v3"
func loadRancherDefaultK8sVersions() map[string]string {
/*
Just mention the major version, the latest minor version will be
automatically picked based on Rancher's max/min version information.
*/
return map[string]string{
"2.3.0": "v1.15.x",
"2.3.1": "v1.15.x",
"2.3.2": "v1.15.x",
"2.3.3": "v1.16.x",
"2.3": "v1.17.x",
// rancher will use default if its version is absent
"default": "v1.17.x",
}
}
func loadRKEDefaultK8sVersions() map[string]string {
return map[string]string{
"0.3": "v1.16.3-rancher1-1",
// rke will use default if its version is absent
"default": "v1.17.2-rancher1-2",
}
}
/*
MaxRancherVersion: Last Rancher version having this k8s in k8sVersionsCurrent
DeprecateRancherVersion: No create/update allowed for RKE >= DeprecateRancherVersion
MaxRKEVersion: Last RKE version having this k8s in k8sVersionsCurrent
DeprecateRKEVersion: No create/update allowed for RKE >= DeprecateRKEVersion
*/
func loadK8sVersionInfo() map[string]v3.K8sVersionInfo {
return map[string]v3.K8sVersionInfo{
"v1.8": {
MaxRancherVersion: "2.2",
MaxRKEVersion: "0.2.2",
},
"v1.9": {
MaxRancherVersion: "2.2",
MaxRKEVersion: "0.2.2",
},
"v1.10": {
MaxRancherVersion: "2.2",
MaxRKEVersion: "0.2.2",
},
"v1.11": {
MaxRancherVersion: "2.2",
MaxRKEVersion: "0.2.2",
},
"v1.12": {
MaxRancherVersion: "2.2",
MaxRKEVersion: "0.2.2",
},
"v1.13": {
MaxRancherVersion: "2.3.1",
MaxRKEVersion: "0.3.1",
},
"v1.14": {
MaxRancherVersion: "2.3.3",
MaxRKEVersion: "1.0.0",
},
"v1.15.5-rancher1-1": {
MaxRancherVersion: "2.2.9",
MaxRKEVersion: "0.2.8",
},
"v1.8.10-rancher1-1": {
DeprecateRKEVersion: "0.2.2",
DeprecateRancherVersion: "2.2",
},
"v1.8.11-rancher1": {
DeprecateRKEVersion: "0.2.2",
DeprecateRancherVersion: "2.2",
},
"v1.9.7-rancher1": {
DeprecateRKEVersion: "0.2.2",
DeprecateRancherVersion: "2.2",
},
"v1.10.1-rancher1": {
DeprecateRKEVersion: "0.2.2",
DeprecateRancherVersion: "2.2",
},
}
}

View File

@ -1,77 +0,0 @@
package rke
import v3 "github.com/rancher/types/apis/management.cattle.io/v3"
func loadK8sVersionWindowsServiceOptions() map[string]v3.KubernetesServicesOptions {
// since 1.14, windows has been supported
return map[string]v3.KubernetesServicesOptions{
"v1.17": {
Kubelet: getWindowsKubeletOptions116(),
Kubeproxy: getWindowsKubeProxyOptions(),
},
"v1.16": {
Kubelet: getWindowsKubeletOptions116(),
Kubeproxy: getWindowsKubeProxyOptions(),
},
"v1.15": {
Kubelet: getWindowsKubeletOptions115(),
Kubeproxy: getWindowsKubeProxyOptions(),
},
}
}
func getWindowsKubeletOptions() map[string]string {
kubeletOptions := getKubeletOptions()
// doesn't support cgroups
kubeletOptions["cgroups-per-qos"] = "false"
kubeletOptions["enforce-node-allocatable"] = "''"
// doesn't support dns
kubeletOptions["resolv-conf"] = "''"
// add prefix path for directory options
kubeletOptions["cni-bin-dir"] = "[PREFIX_PATH]/opt/cni/bin"
kubeletOptions["cni-conf-dir"] = "[PREFIX_PATH]/etc/cni/net.d"
kubeletOptions["cert-dir"] = "[PREFIX_PATH]/var/lib/kubelet/pki"
kubeletOptions["volume-plugin-dir"] = "[PREFIX_PATH]/var/lib/kubelet/volumeplugins"
// add reservation for kubernetes components
kubeletOptions["kube-reserved"] = "cpu=500m,memory=500Mi,ephemeral-storage=1Gi"
// add reservation for system
kubeletOptions["system-reserved"] = "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi"
// increase image pulling deadline
kubeletOptions["image-pull-progress-deadline"] = "30m"
// enable some windows features
kubeletOptions["feature-gates"] = "HyperVContainer=true,WindowsGMSA=true"
return kubeletOptions
}
func getWindowsKubeletOptions115() map[string]string {
kubeletOptions := getWindowsKubeletOptions()
// doesn't support `allow-privileged`
delete(kubeletOptions, "allow-privileged")
return kubeletOptions
}
func getWindowsKubeletOptions116() map[string]string {
kubeletOptions := getWindowsKubeletOptions()
// doesn't support `allow-privileged`
delete(kubeletOptions, "allow-privileged")
return kubeletOptions
}
func getWindowsKubeProxyOptions() map[string]string {
kubeProxyOptions := getKubeProxyOptions()
// use kernelspace proxy mode
kubeProxyOptions["proxy-mode"] = "kernelspace"
// enable Windows Overlay support
kubeProxyOptions["feature-gates"] = "WinOverlay=true"
// disable Windows DSR support explicitly
kubeProxyOptions["enable-dsr"] = "false"
return kubeProxyOptions
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,942 +0,0 @@
package templates
const CoreDNSTemplate = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
{{- if .UpstreamNameservers }}
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
forward . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
strategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
{{end}}
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns-autoscaler
{{- end }}
nodeSelector:
beta.kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
{{if .LinearAutoscalerParams}}
- --default-params={"linear":{{.LinearAutoscalerParams}}}
{{else}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
{{end}}
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`
const CoreDNSTemplateV116 = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
{{- if .UpstreamNameservers }}
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
forward . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
strategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
{{end}}
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns-autoscaler
{{- end }}
nodeSelector:
beta.kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
{{if .LinearAutoscalerParams}}
- --default-params={"linear":{{.LinearAutoscalerParams}}}
{{else}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
{{end}}
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`
const CoreDNSTemplateV117 = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
{{- if .UpstreamNameservers }}
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
forward . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
strategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
{{end}}
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns-autoscaler
{{- end }}
nodeSelector:
beta.kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
{{if .LinearAutoscalerParams}}
- --default-params={"linear":{{.LinearAutoscalerParams}}}
{{else}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
{{end}}
- --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`

View File

@ -1,718 +0,0 @@
package templates
const FlannelTemplate = `
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
{{- end}}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: "kube-system"
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name":"cbr0",
"cniVersion":"0.3.1",
"plugins":[
{
"type":"flannel",
"delegate":{
"forceAddress":true,
"isDefaultGateway":true
}
},
{
"type":"portmap",
"capabilities":{
"portMappings":true
}
}
]
}
net-conf.json: |
{
"Network": "{{.ClusterCIDR}}",
"Backend": {
"Type": "{{.FlannelBackend.Type}}",
"VNI": {{.FlannelBackend.VNI}},
"Port": {{.FlannelBackend.Port}}
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: "kube-system"
labels:
tier: node
k8s-app: flannel
spec:
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
serviceAccountName: flannel
containers:
- name: kube-flannel
image: {{.Image}}
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 300m
memory: 500M
requests:
cpu: 150m
memory: 64M
{{- if .FlannelInterface}}
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"]
{{- else}}
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"]
{{- end}}
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
tolerations:
{{- if ge .ClusterVersion "v1.12" }}
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
{{- else }}
- key: node-role.kubernetes.io/controlplane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/etcd
operator: Exists
effect: NoExecute
{{- end }}
- key: node.kubernetes.io/not-ready
effect: NoSchedule
operator: Exists
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
{{end}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
`
const FlannelTemplateV115 = `
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
{{end}}
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unsed in CaaSP
rule: 'RunAsAny'
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion":"0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{.ClusterCIDR}}",
"Backend": {
"Type": "{{.FlannelBackend.Type}}",
"VNI": {{.FlannelBackend.VNI}},
"Port": {{.FlannelBackend.Port}}
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: kube-system
labels:
tier: node
k8s-app: flannel
spec:
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
hostNetwork: true
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
tolerations:
- operator: Exists
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: flannel
{{end}}
containers:
- name: kube-flannel
image: {{.Image}}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
{{- if .FlannelInterface}}
- --iface={{.FlannelInterface}}
{{end}}
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
{{end}}
`
const FlannelTemplateV116 = `
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
{{end}}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unsed in CaaSP
rule: 'RunAsAny'
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion":"0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{.ClusterCIDR}}",
"Backend": {
"Type": "{{.FlannelBackend.Type}}",
"VNI": {{.FlannelBackend.VNI}},
"Port": {{.FlannelBackend.Port}}
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: kube-system
labels:
tier: node
k8s-app: flannel
spec:
selector:
matchLabels:
k8s-app: flannel
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
hostNetwork: true
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
tolerations:
- operator: Exists
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: flannel
{{end}}
containers:
- name: kube-flannel
image: {{.Image}}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
{{- if .FlannelInterface}}
- --iface={{.FlannelInterface}}
{{end}}
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
type: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
{{end}}
`

View File

@ -1,676 +0,0 @@
package templates
const KubeDNSTemplate = `
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
spec:
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
serviceAccountName: kube-dns-autoscaler
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.KubeDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
{{if .LinearAutoscalerParams}}
- --default-params={"linear":{{.LinearAutoscalerParams}}}
{{else}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
{{end}}
- --logtostderr=true
- --v=2
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
{{end}}
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{.KubeDNSImage}}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{.ClusterDomain}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{.DNSMasqImage}}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
{{- if .ReverseCIDRs }}
{{- range .ReverseCIDRs }}
- --server=/{{.}}/127.0.0.1#10053
{{- end }}
{{- else }}
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
{{- end }}
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{.KubeDNSSidecarImage}}
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
{{- if .UpstreamNameservers }}
upstreamNameservers: |
[{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
{{- end }}
{{- if .StubDomains }}
stubDomains: |
{{ GetKubednsStubDomains .StubDomains }}
{{- end }}`
const KubeDNSTemplateV116 = `
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
serviceAccountName: kube-dns-autoscaler
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.KubeDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
{{if .LinearAutoscalerParams}}
- --default-params={"linear":{{.LinearAutoscalerParams}}}
{{else}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
{{end}}
- --logtostderr=true
- --v=2
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 4}}
{{else}}
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
{{end}}
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{.KubeDNSImage}}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{.ClusterDomain}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{.DNSMasqImage}}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
{{- if .ReverseCIDRs }}
{{- range .ReverseCIDRs }}
- --server=/{{.}}/127.0.0.1#10053
{{- end }}
{{- else }}
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
{{- end }}
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{.KubeDNSSidecarImage}}
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
{{- if .UpstreamNameservers }}
upstreamNameservers: |
[{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
{{- end }}
{{- if .StubDomains }}
stubDomains: |
{{ GetKubednsStubDomains .StubDomains }}
{{- end }}`

View File

@ -1,170 +0,0 @@
package templates
const MetricsServerTemplate = `
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- deployments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
{{- end }}
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
{{if .Replicas}}
replicas: {{.Replicas}}
{{end}}
selector:
matchLabels:
k8s-app: metrics-server
{{if .UpdateStrategy}}
strategy:
{{ toYaml .UpdateStrategy | indent 4}}
{{end}}
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
serviceAccountName: metrics-server
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: metrics-server
image: {{ .MetricsServerImage }}
imagePullPolicy: Always
command:
- /metrics-server
{{- if eq .Version "v0.3" }}
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP
- --logtostderr
{{- else }}
- --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true
{{- end }}
{{ range $k,$v := .Options }}
- --{{ $k }}={{ $v }}
{{ end }}
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: 443
`

View File

@ -1,725 +0,0 @@
package templates
const NginxIngressTemplate = `
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx
data:
{{ range $k,$v := .Options }}
{{ $k }}: "{{ $v }}"
{{ end }}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
{{if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
- daemonsets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
{{ end }}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app: ingress-nginx
{{if .UpdateStrategy}}
updateStrategy:
{{ toYaml .UpdateStrategy | indent 4}}
{{end}}
template:
metadata:
labels:
app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
hostNetwork: true
{{if .DNSPolicy}}
dnsPolicy: {{.DNSPolicy}}
{{end}}
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
{{if eq .RBACConfig "rbac"}}
serviceAccountName: nginx-ingress-serviceaccount
{{ end }}
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
{{- if ne .AlpineImage ""}}
initContainers:
- command:
- sh
- -c
- sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535"
image: {{.AlpineImage}}
imagePullPolicy: IfNotPresent
name: sysctl
securityContext:
privileged: true
{{- end }}
containers:
- name: nginx-ingress-controller
image: {{.IngressImage}}
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
{{ range $k, $v := .ExtraArgs }}
- --{{ $k }}{{if ne $v "" }}={{ $v }}{{end}}
{{ end }}
{{- if eq .AlpineImage ""}}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 33
{{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{if .ExtraEnvs}}
{{ toYaml .ExtraEnvs | indent 12}}
{{end}}
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
{{if .ExtraVolumeMounts}}
volumeMounts:
{{ toYaml .ExtraVolumeMounts | indent 12}}
{{end}}
{{if .ExtraVolumes}}
volumes:
{{ toYaml .ExtraVolumes | indent 8}}
{{end}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
labels:
app: default-http-backend
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
terminationGracePeriodSeconds: 60
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: {{.IngressBackend}}
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: ingress-nginx
labels:
app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: default-http-backend
`
const NginxIngressTemplateV0251Rancher1 = `
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app: ingress-nginx
data:
{{ range $k,$v := .Options }}
{{ $k }}: "{{ $v }}"
{{ end }}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
{{if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses
- daemonsets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
{{ end }}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app: ingress-nginx
{{if .UpdateStrategy}}
updateStrategy:
{{ toYaml .UpdateStrategy | indent 4}}
{{end}}
template:
metadata:
labels:
app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
hostNetwork: true
{{if .DNSPolicy}}
dnsPolicy: {{.DNSPolicy}}
{{end}}
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
{{if eq .RBACConfig "rbac"}}
serviceAccountName: nginx-ingress-serviceaccount
{{ end }}
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
{{- if ne .AlpineImage ""}}
initContainers:
- command:
- sh
- -c
- sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535"
image: {{.AlpineImage}}
imagePullPolicy: IfNotPresent
name: sysctl
securityContext:
privileged: true
{{- end }}
containers:
- name: nginx-ingress-controller
image: {{.IngressImage}}
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
{{ range $k, $v := .ExtraArgs }}
- --{{ $k }}{{if ne $v "" }}={{ $v }}{{end}}
{{ end }}
{{- if eq .AlpineImage ""}}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 33
{{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{if .ExtraEnvs}}
{{ toYaml .ExtraEnvs | indent 12}}
{{end}}
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
{{if .ExtraVolumeMounts}}
volumeMounts:
{{ toYaml .ExtraVolumeMounts | indent 12}}
{{end}}
{{if .ExtraVolumes}}
volumes:
{{ toYaml .ExtraVolumes | indent 8}}
{{end}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
labels:
app: default-http-backend
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
terminationGracePeriodSeconds: 60
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: {{.IngressBackend}}
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: ingress-nginx
labels:
app: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: default-http-backend
`

View File

@ -1,134 +0,0 @@
package templates
/*
Not including Vsphere(cloudProvider) and Authz templates
Will they change and require Rancher to pass them to RKE
depending on Kubernetes version?
*/
const (
Calico = "calico"
Canal = "canal"
Flannel = "flannel"
Weave = "weave"
CoreDNS = "coreDNS"
KubeDNS = "kubeDNS"
MetricsServer = "metricsServer"
NginxIngress = "nginxIngress"
TemplateKeys = "templateKeys"
calicov18 = "calico-v1.8"
calicov113 = "calico-v1.13"
calicov115 = "calico-v1.15"
calicov116 = "calico-v1.16"
calicov117 = "calico-v1.17"
canalv18 = "canal-v1.8"
canalv113 = "canal-v1.13"
canalv115 = "canal-v1.15"
canalv116 = "canal-v1.16"
canalv117 = "canal-v1.17"
flannelv18 = "flannel-v1.8"
flannelv115 = "flannel-v1.15"
flannelv116 = "flannel-v1.16"
coreDnsv18 = "coredns-v1.8"
coreDnsv116 = "coredns-v1.16"
coreDnsv117 = "coredns-v1.17"
kubeDnsv18 = "kubedns-v1.8"
kubeDnsv116 = "kubedns-v1.16"
metricsServerv18 = "metricsserver-v1.8"
weavev18 = "weave-v1.8"
weavev116 = "weave-v1.16"
nginxIngressv18 = "nginxingress-v1.8"
nginxIngressV115 = "nginxingress-v1.15"
)
func LoadK8sVersionedTemplates() map[string]map[string]string {
return map[string]map[string]string{
Calico: {
">=1.16.4-rancher1": calicov117,
">=1.16.0-alpha <1.16.4-rancher1": calicov116,
">=1.15.0-rancher0 <1.16.0-alpha": calicov115,
">=1.13.0-rancher0 <1.15.0-rancher0": calicov113,
">=1.8.0-rancher0 <1.13.0-rancher0": calicov18,
},
Canal: {
">=1.16.4-rancher1": canalv117,
">=1.16.0-alpha <1.16.4-rancher1": canalv116,
">=1.15.0-rancher0 <1.16.0-alpha": canalv115,
">=1.13.0-rancher0 <1.15.0-rancher0": canalv113,
">=1.8.0-rancher0 <1.13.0-rancher0": canalv18,
},
Flannel: {
">=1.16.0-alpha": flannelv116,
">=1.15.0-rancher0 <1.16.0-alpha": flannelv115,
">=1.8.0-rancher0 <1.15.0-rancher0": flannelv18,
},
CoreDNS: {
">=1.17.0-alpha": coreDnsv117,
">=1.16.0-alpha <1.17.0-alpha": coreDnsv116,
">=1.8.0-rancher0 <1.16.0-alpha": coreDnsv18,
},
KubeDNS: {
">=1.16.0-alpha": kubeDnsv116,
">=1.8.0-rancher0 <1.16.0-alpha": kubeDnsv18,
},
MetricsServer: {
">=1.8.0-rancher0": metricsServerv18,
},
Weave: {
">=1.16.0-alpha": weavev116,
">=1.8.0-rancher0 <1.16.0-alpha": weavev18,
},
NginxIngress: {
">=1.8.0-rancher0 <1.13.10-rancher1-3": nginxIngressv18,
">=1.13.10-rancher1-3 <1.14.0-rancher0": nginxIngressV115,
">=1.14.0-rancher0 <=1.14.6-rancher1-1": nginxIngressv18,
">=1.14.6-rancher2 <1.15.0-rancher0": nginxIngressV115,
">=1.15.0-rancher0 <=1.15.3-rancher1-1": nginxIngressv18,
">=1.15.3-rancher2": nginxIngressV115,
},
TemplateKeys: getTemplates(),
}
}
func getTemplates() map[string]string {
return map[string]string{
calicov113: CalicoTemplateV113,
calicov115: CalicoTemplateV115,
calicov116: CalicoTemplateV116,
calicov117: CalicoTemplateV117,
calicov18: CalicoTemplateV112,
flannelv115: FlannelTemplateV115,
flannelv116: FlannelTemplateV116,
flannelv18: FlannelTemplate,
canalv113: CanalTemplateV113,
canalv18: CanalTemplateV112,
canalv115: CanalTemplateV115,
canalv116: CanalTemplateV116,
canalv117: CanalTemplateV117,
coreDnsv18: CoreDNSTemplate,
coreDnsv116: CoreDNSTemplateV116,
coreDnsv117: CoreDNSTemplateV117,
kubeDnsv18: KubeDNSTemplate,
kubeDnsv116: KubeDNSTemplateV116,
metricsServerv18: MetricsServerTemplate,
weavev18: WeaveTemplate,
weavev116: WeaveTemplateV116,
nginxIngressv18: NginxIngressTemplate,
nginxIngressV115: NginxIngressTemplateV0251Rancher1,
}
}

View File

@ -1,514 +0,0 @@
package templates
const WeaveTemplate = `
---
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
spec:
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]
labels:
name: weave-net
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: "{{.ClusterCIDR}}"
{{- if .WeavePassword}}
- name: WEAVE_PASSWORD
value: "{{.WeavePassword}}"
{{- end}}
{{- if .MTU }}
{{- if ne .MTU 0 }}
- name: WEAVE_MTU
value: "{{.MTU}}"
{{- end }}
{{- end }}
image: {{.Image}}
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: {{.CNIImage}}
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-plugins
command:
- /opt/rke-tools/weave-plugins-cni.sh
image: {{.WeaveLoopbackImage}}
securityContext:
privileged: true
volumeMounts:
- name: cni-bin
mountPath: /opt
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
updateStrategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 8}}
{{end}}
type: RollingUpdate
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
rules:
- apiGroups:
- ''
resourceNames:
- weave-net
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
{{- end}}
`
const WeaveTemplateV116 = `
---
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
- apiVersion: apps/v1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
spec:
selector:
matchLabels:
name: weave-net
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]
labels:
name: weave-net
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: "{{.ClusterCIDR}}"
{{- if .WeavePassword}}
- name: WEAVE_PASSWORD
value: "{{.WeavePassword}}"
{{- end}}
{{- if .MTU }}
{{- if ne .MTU 0 }}
- name: WEAVE_MTU
value: "{{.MTU}}"
{{- end }}
{{- end }}
image: {{.Image}}
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: {{.CNIImage}}
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-plugins
command:
- /opt/rke-tools/weave-plugins-cni.sh
image: {{.WeaveLoopbackImage}}
securityContext:
privileged: true
volumeMounts:
- name: cni-bin
mountPath: /opt
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
updateStrategy:
{{if .UpdateStrategy}}
{{ toYaml .UpdateStrategy | indent 8}}
{{end}}
type: RollingUpdate
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
rules:
- apiGroups:
- ''
resourceNames:
- weave-net
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
{{- end}}
`

54
vendor/github.com/rancher/types/kdm/kdm.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package kdm
import (
"encoding/json"
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
)
const (
Calico = "calico"
Canal = "canal"
Flannel = "flannel"
Weave = "weave"
CoreDNS = "coreDNS"
KubeDNS = "kubeDNS"
MetricsServer = "metricsServer"
NginxIngress = "nginxIngress"
TemplateKeys = "templateKeys"
)
type Data struct {
// K8sVersionServiceOptions - service options per k8s version
K8sVersionServiceOptions map[string]v3.KubernetesServicesOptions
K8sVersionRKESystemImages map[string]v3.RKESystemImages
// Addon Templates per K8s version ("default" where nothing changes for k8s version)
K8sVersionedTemplates map[string]map[string]string
// K8sVersionInfo - min/max RKE+Rancher versions per k8s version
K8sVersionInfo map[string]v3.K8sVersionInfo
//Default K8s version for every rancher version
RancherDefaultK8sVersions map[string]string
//Default K8s version for every rke version
RKEDefaultK8sVersions map[string]string
K8sVersionDockerInfo map[string][]string
// K8sVersionWindowsServiceOptions - service options per windows k8s version
K8sVersionWindowsServiceOptions map[string]v3.KubernetesServicesOptions
CisConfigParams map[string]v3.CisConfigParams
CisBenchmarkVersionInfo map[string]v3.CisBenchmarkVersionInfo
}
func FromData(b []byte) (Data, error) {
d := &Data{}
if err := json.Unmarshal(b, d); err != nil {
return Data{}, err
}
return *d, nil
}

7
vendor/modules.txt vendored
View File

@ -56,6 +56,9 @@ github.com/docker/go-connections/tlsconfig
github.com/docker/go-units
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-bindata/go-bindata
github.com/go-bindata/go-bindata/go-bindata
# github.com/go-ini/ini v1.37.0
github.com/go-ini/ini
# github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
@ -130,9 +133,6 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/rancher/kontainer-driver-metadata v0.0.0-20200218183853-2f58a2f30054
github.com/rancher/kontainer-driver-metadata/rke
github.com/rancher/kontainer-driver-metadata/rke/templates
# github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd
github.com/rancher/norman/condition
github.com/rancher/norman/controller
@ -153,6 +153,7 @@ github.com/rancher/types/apis/management.cattle.io/v3
github.com/rancher/types/apis/project.cattle.io/v3
github.com/rancher/types/condition
github.com/rancher/types/image
github.com/rancher/types/kdm
# github.com/rancher/wrangler v0.4.2-0.20200214231136-099089b8a398
github.com/rancher/wrangler/pkg/name
github.com/rancher/wrangler/pkg/ratelimit