Compare commits

...

210 Commits
v3.3 ... v3.8

Author SHA1 Message Date
Doug Smith
efdc0a5c7d Merge pull request #724 from s1061123/downgrade-k8s-vendor
Downgrade Kubernetes vendor version
2021-09-17 14:46:50 -04:00
Tomofumi Hayashi
ae4a28b689 Downgrade Kubernetes vendor version
Multus community decide that multus support the Kubernetes that
Kubernetes community supports, so changed vendor code to the
oldest version of currently supported.
2021-09-17 22:48:12 +09:00
Doug Smith
e1b0698eb2 Merge pull request #723 from s1061123/update-go-version
Update golang version (at least >= 1.16)
2021-09-15 08:13:42 -04:00
Tomofumi Hayashi
35fdb29385 Update golang version (at least >= 1.16)
Currently golang community supports 1.16 and later, hence, need
to update golang version in our build pipeline. This change
updates golang version to 1.17/1.16.
2021-09-14 16:53:35 +09:00
张祖建
a28f5cb56c Fix typos
Signed-off-by: 张祖建 <zhangzujian.7@gmail.com>
2021-09-14 00:25:01 +09:00
Doug Smith
ad257698ef Merge pull request #712 from nicklesimba/patch-1
Update quickstart.md
2021-08-25 16:48:33 -04:00
Nikhil Simha
8b3bbf38c8 Update quickstart.md
There is a typo in the expected resulting annotations
2021-08-25 13:27:18 -04:00
Doug Smith
a506d7606c Merge pull request #696 from cgchinmay/fix_entrypoint
Use MULTUS_MASTER_CNI_FILE_NAME as MASTER_PLUGIN as is, if specified
2021-07-16 15:25:51 -04:00
Chinmay Gadgil
7e44bb6d21 Use MULTUS_MASTER_CNI_FILE_NAME as MASTER_PLUGIN as is, if specified 2021-07-12 10:30:20 -07:00
Tomofumi Hayashi
9b45d4b211 Merge branch 'master' of https://github.com/k8snetworkplumbingwg/multus-cni 2021-07-08 22:52:30 +09:00
Doug Smith
f77c591d69 Merge pull request #695 from cgchinmay/update_golang_version
Update golang version for installing packages
2021-07-08 09:52:03 -04:00
Tomofumi Hayashi
8c9f409032 Fix github action 2021-07-08 22:52:02 +09:00
Chinmay Gadgil
0724c2b9b0 Update golang version for installing packages 2021-07-06 22:56:21 -07:00
Miguel Duarte Barroso
085870c4b5 Export poll[Interval|Timeout] as constants
Thus not repeating ourselves, nor shadowing the global variables
that had the same name (pollInterval / pollTimeout).

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
2021-06-26 00:06:30 +09:00
Miguel Duarte Barroso
26ef3c3eb4 Cut the retry interval and timeout by half on pod retrieval
Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
2021-06-26 00:06:30 +09:00
Miguel Duarte Barroso
1eeab9b589 Retry pod retrieval on multiple error types
Also retry on the following error types (cmd ADD/DEL):
- IsInternalError
- IsConnectionReset
- IsConnectionRefused

Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
2021-06-26 00:06:30 +09:00
Doug Smith
7f5620db76 Merge pull request #689 from maiqueb/fix-missing-cni-version-example
examples: missing cni version on version without delegates
2021-06-23 12:57:56 -04:00
Miguel Duarte Barroso
1e29a6b50c examples: missing cni version on version without delegates
Signed-off-by: Miguel Duarte Barroso <mdbarroso@redhat.com>
2021-06-23 18:23:30 +02:00
Peng Liu
d09cc8e581 Use the default socket path in GetResourceClient when unspecified
Signed-off-by: Peng Liu <pliu@redhat.com>
2021-06-08 23:05:54 +09:00
xieyanker
cd23938191 Update calico daemonset's url 2021-06-02 21:32:08 +09:00
xieyanker
3b8aa66765 Fix typo 2021-06-02 21:31:37 +09:00
Doug Smith
37adefba51 Merge pull request #679 from xieyanker/master_cni_file
Support the specific cni file name
2021-06-02 08:08:05 -04:00
xieyanker
ec30b99534 Update how-to-use --multus-master-cni-file-name 2021-06-02 14:24:41 +08:00
xieyanker
a53233028b Update ORG_PATH for test-go.sh 2021-06-01 21:58:05 +09:00
xieyanker
492ffec8c8 Support the specific cni file name 2021-05-29 15:29:37 +08:00
Doug Smith
c25a39c5a3 Merge pull request #672 from s1061123/dev/update-lib
Update vendor package
2021-05-26 14:04:02 -04:00
Tomofumi Hayashi
3f1031e7b4 Update vendor package 2021-05-27 02:25:34 +09:00
Tomofumi Hayashi
2339c11a15 Remove global variables from kubletclient
Fixes #673
2021-05-27 01:45:15 +09:00
Sho SHIMIZU
2ba3d3cda2 Describe additional condition when using clusterNetwork
When configuring `clsuterNetwork` in Multus CNI config, the value
for `clusterNetwork` needs to match the value for `name` key in
the file `clusterNetwork` setting refers to. This condition is
undocumented in doc/configuration.md. This PR adds description
on the condition for users to understand `clusterNetwork` config
more clearly.

Signed-off-by: Sho SHIMIZU <sho.shimizu@gmail.com>
2021-05-24 22:11:30 +09:00
dougbtv
6abe8ee06b The kubeconfig creation should be an atomic operation 2021-05-13 22:40:06 +09:00
Doug Smith
a03b3e8c41 Merge pull request #666 from s1061123/fix/update-nad-lib
Update net-attach-def client library
2021-05-12 12:11:14 -04:00
Tomofumi Hayashi
753d18740a Update net-attach-def client library 2021-05-11 01:29:26 +09:00
Doug Smith
1a0fa953f9 Merge pull request #661 from xieyanker/patch-1
update 30-multus.conf to 00-multus.conf
2021-04-29 09:39:45 -04:00
xieyanker
bd57d31780 update 30-multus.conf to 00-multus.conf 2021-04-28 15:50:41 +08:00
Doug Smith
91a7d22e2b Merge pull request #657 from s1061123/dev/add-cluster-net-name
Add cluster network name in pod network annotation
2021-04-20 15:01:55 -04:00
Doug Smith
8d555dba24 Merge pull request #658 from e0ne/fix-typo
Fix typo: s/Lauch/Launch
2021-04-20 15:01:00 -04:00
Ivan Kolodyazhny
e566760a45 Fix typo: s/Lauch/Launch 2021-04-20 21:18:04 +03:00
Tomofumi Hayashi
cd7492f79b Add cluster network name in pod network annotation
This addresses #656
2021-04-19 23:20:36 +09:00
Bjorn Svensson
9e3ad2ea1e Correcting Dockerfile path for manual e2e tests
Signed-off-by: Björn Svensson <bjorn.a.svensson@est.tech>
2021-04-19 17:34:30 +09:00
Doug Smith
a52680b3bf Merge pull request #651 from s1061123/dev/add-arm32
Add arm32v7 container image support
2021-04-07 11:37:00 -04:00
Tomofumi Hayashi
78cf9fced8 Add arm32v7 container image support
Currently multus-cni releases arm32 binary as release tar ball.
This changes also introduce container image for easy deployment.
Fix #639
2021-04-04 04:23:39 +09:00
dougbtv
905b5d0e42 Github action for closing stale issues 2021-04-03 18:12:58 +09:00
dougbtv
0ef8c27a67 [docs] Adds release policy 2021-04-01 22:53:15 +09:00
Doug Smith
e603d8dbb0 Merge pull request #644 from s1061123/dev/cleanup-dev-info
Skip to call device-info related function if not required
2021-04-01 09:52:17 -04:00
Tomofumi Hayashi
9e96c38ebe Skip to call device-info related function if not required
The case withot DeviceInfo/ResourceName, we could skip functions
related to device info. This change skips to call these functions
if delegates does not have DeviceInfo/ResourceName.
2021-04-01 03:55:57 +09:00
Doug Smith
ad6f57f14d Merge pull request #643 from s1061123/fix/no-network-error
[WIP] Change nil instead of error in case of no network annotation
2021-03-31 12:14:56 -04:00
Tomofumi Hayashi
479e2bd78e Change nil instead of error in case of no network annotation
fix #642
2021-03-31 23:06:55 +09:00
Doug Smith
ae56437297 Merge pull request #638 from s1061123/dev/use-non-default-libcni-cache
Change libcni's cache directory from default for multus delegates
2021-03-29 17:05:05 -04:00
Doug Smith
dc4d6e6764 Merge pull request #636 from s1061123/fix/error-on-syntax-error
Return error on annotation parsing error
2021-03-29 09:23:27 -04:00
Tomofumi Hayashi
75acc10312 Change libcni's cache directory from default for multus delegates
This changes CNI cache directory to non default places to avoid
conflicts among CNI runtimes.
2021-03-27 04:12:10 +09:00
Tomofumi Hayashi
8d7308e6bb Return error on annotation parsing error
Fix #635
2021-03-26 01:31:01 +09:00
Doug Smith
c4b9534529 Merge pull request #634 from s1061123/fix/delete_old_main
Delete main from pkg/multus because now it was moved to cmd/main.go
2021-03-24 17:02:45 -04:00
Tomofumi Hayashi
c8e63996c8 Delete main from pkg/multus because now it was moved to cmd/main.go 2021-03-25 03:02:50 +09:00
dougbtv
5d98a0e0eb Updates Readinessindicator error message to be actionable by users 2021-03-24 04:19:44 +09:00
Doug Smith
343a4e3687 Merge pull request #629 from s1061123/fix/change-org-name
Fix/change org name
2021-03-17 15:36:17 -04:00
Doug Smith
feb52328d2 Merge pull request #627 from s1061123/fix/change-image-url
Change image url to ghcr.io
2021-03-16 14:50:00 -04:00
Tomofumi Hayashi
118cc629cf Update organization name due to ownership change 2021-03-16 16:24:49 +09:00
Tomofumi Hayashi
784fecfa02 Change image url to ghcr.io 2021-03-13 05:05:17 +09:00
Doug Smith
f6298a3a29 Merge pull request #626 from s1061123/dev/use-ghcr
Change github action to use ghcr.io
2021-03-12 14:51:45 -05:00
Tomofumi Hayashi
20049dedfe Change github action to use ghcr.io 2021-03-13 04:38:21 +09:00
Doug Smith
20a5d5f34a Merge pull request #625 from e0ne/example-pod
Fix link to an example pod with SRIOV network
2021-03-12 14:33:59 -05:00
Ivan Kolodyazhny
b97c443a56 Fix link to an example pod with SRIOV network 2021-03-11 16:48:04 +02:00
Doug Smith
40a0faed5e Merge pull request #624 from s1061123/dev/bump-libcni
Bump libcni version
2021-03-10 15:53:14 -05:00
Tomofumi Hayashi
5feb1343cd Bump libcni version 2021-03-11 04:50:21 +09:00
Yun Zhou
dc9315f125 Provide the option to not to log via STDERR
Today, Multus will always log via STDERR, and these logs will then
logged by the Kubelet. If we also choose to have Multus log to a file by
setting the LogFile option in the CNI configuration, the same logs will
be logged twice.

This commit provide the option to disable logging to STDERR.

Signed-off-by: Yun Zhou <yunz@nvidia.com>
2021-03-10 17:13:12 +09:00
Kennelly, Martin
5141eab28a Fix invite to NPWG slack
Readding NPWG slack invite mechanism.

Signed-off-by: Kennelly, Martin <martin.kennelly@intel.com>
2021-03-02 04:39:51 +09:00
Doug Smith
c1166d2d3d Merge pull request #617 from s1061123/fix/check-tty
Conditional check for tty/non-tty in case of wait (i.e. read)
2021-02-25 10:22:17 -05:00
Tomofumi Hayashi
de463cca0d Conditional check for tty/non-tty in case of wait (i.e. read)
Fix #615.
2021-02-26 00:18:06 +09:00
Tomofumi Hayashi
02ad030899 Fix CI info 2021-02-24 15:32:46 +09:00
Doug Smith
e80696dc8e Merge pull request #613 from s1061123/dev/update-gh-actions
Update github actions CI pipeline including lint error fix
2021-02-23 10:05:55 -05:00
Tomofumi Hayashi
9bfb9b9b67 Update github actions CI pipeline including lint error fix 2021-02-23 20:01:34 +09:00
Doug Smith
a0d292a0e8 Merge pull request #606 from dougbtv/bump-protobuf
Bump protobuf to v1.3.2
2021-02-18 14:56:41 -05:00
dougbtv
fe42962eb5 [deps][vendor] Updates gogo/protobuf 2021-02-18 11:29:20 -05:00
Kennelly, Martin
094dcbe2c2 Update Slack URL in README following renaming
Signed-off-by: Kennelly, Martin <martin.kennelly@intel.com>
2021-02-18 23:42:26 +09:00
dougbtv
8bbd3fdcf2 Change to sleep infinity & read and omit & wait on sleep n (e.g. not infinity) 2021-02-18 23:42:00 +09:00
Doug Smith
e728da78bc Merge pull request #611 from yoheiueda/s390x
Add s390x support
2021-02-18 09:39:40 -05:00
Yohei Ueda
9824963f79 Add s390x support
This PR updates the GitHub Actions workflows to build multus
container images for the IBM Z architecture (s390x), and also
adds daemonsets for s390x nodes.

entrypoint.sh uses python3 when it is available, since centos images
are not available for s390x, and python2 is not installed by default
on debian-based images.

Signed-off-by: Yohei Ueda <yohei@jp.ibm.com>
2021-02-18 01:40:33 -05:00
Doug Smith
cd9efbf703 Merge pull request #607 from amorenoz/bug/runtime
conf: do not modify global runtimeconfig when merging
2021-02-16 14:23:34 -05:00
Adrian Moreno
91e4efcd68 conf: make a copy of global RuntimeConfig on merge
When we call mergeRuntimeConfig, the global RuntimeConfig gets
overwritten with the result of the merging, thus affecting the
subsequent delegates.

Do not modify the global RuntimeConfig and instead make a copy
when merging it.

Also, if a value has been provided for CNIDeviceInfoFile in the
delegate's runtimeconfig, overwrite it to avoid possible name
colissions.

Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
2021-02-11 20:39:41 +01:00
dougbtv
63734fc026 Adds a trap for SIGTERM in entrypoint script 2021-02-10 04:57:07 +09:00
Doug Smith
bd90c262f6 Merge pull request #603 from s1061123/fix/graceperiod
Add grace period to missing deployment template
2021-01-29 08:40:48 -05:00
Tomofumi Hayashi
d093709f94 Add grace period to missing deployment template 2021-01-28 15:25:45 +09:00
dougbtv
f6df613c32 Pods in daemonset should have grace period reduced.
Otherwise, it can take a long time for the daemonset to stop, and there's no real reason it needs to hang out for a long time.
2021-01-28 15:22:27 +09:00
dougbtv
d4e8699825 The readme links need to be updated after directory change.
This updates the links to point to "doc/" instead of "docs/"
2020-12-17 05:23:14 +09:00
Nikhil Simha
62abb002dd Restructured multus to use Go standard project layout. 2020-12-16 22:38:10 +09:00
Doug Smith
bd9f2e9700 Merge pull request #595 from Mmduh-483/capabilities-entry-fail
Fix entrypoint fail to read capabilities in non chain plugin config
2020-12-14 08:28:41 -05:00
Mamduh Alassi
09190bce3e Fix entrypoint fail to read capabilities in non chain plugin config
entrypoint script fails with error when try reading capabilities in
non chain plugin config file when using "--multus-conf-file=auto"

Signed-off-by: Mamduh Alassi <mamduhala@mellanox.com>
2020-12-14 12:58:35 +02:00
Doug Smith
24386abae3 Merge pull request #553 from venuiyer/vi-multus
Allow the default-route to be empty
2020-12-10 09:45:53 -05:00
dougbtv
476c738c13 Allows specifying "global namespaces" when using namespace isolation 2020-12-10 23:40:18 +09:00
Doug Smith
e924318e18 Merge pull request #577 from Billy99/billy99-device-info
Add DeviceInfo Data to NetworkStatus Annotation
2020-12-01 13:50:26 -05:00
dougbtv
cfa0d64925 Multus should exit zero on DEL when cache file is missing and pod cannot be found.
Otherwise, this can cause a pod to not be fully deleted, its sandbox may remain as the DEL continues to be retried due to exiting non-zero every time.

See: https://bugzilla.redhat.com/show_bug.cgi?id=1900835
2020-11-25 22:11:17 +09:00
Billy McFall
ae797801b1 Remove Multus version of NetworkStatus
NetworkStatus was moved to network-attachment-definition-client in previous
merges and the Multus version of the structure was left behind, and not
being used.

Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:38:42 -05:00
Billy McFall
f826461b06 Add DeviceInfo Data to Network Status Annotation
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:38:42 -05:00
Billy McFall
fb557ab20f Add CNIDeviceFile in RuntimeConfig
DPDeviceFile is used by Device Plugins to pass device data to CNIs, as defined
in the device-info-spec. The name of the DPDeviceFile is defined by the
device-info-spec as:
 <ResourceName>-<DeviceID>-device.json

If the DPDeviceFile exists, the NPWG implementation makes a copy of the file
and passes the name of the file to the delegate CNI via capabilityArgs as
CNIDeviceFile. If the DPDeviceFile does not exist, the filename is still
passed to the CNI. The CNI can create the file and popluate it if a device
is created within the CNI.

The name of the CNIDeviceFile is not defined by device-info-spec, but to
ensure the name does not clash it is formed by the following unique triplet:
[networkName, PodUUID, ifName]

k8snetworkplumbingwg/network-attachment-definition-client repo has utility
functions to abstract some of this functionality so it can be reused across
Device Plugins, NPWG implementations and CNIs.

Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:38:30 -05:00
Billy McFall
fd3f485e63 Add deviceID support in runtimeConfig
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:34:29 -05:00
Billy McFall
665c43a2cd Merge RuntimeConf on delPlugins
RuntimeConfig depends on the delegate configuration. The Netconf runtime
information should be merged with the delegate config and the result
added to each command that is sent to the delegates. That was being done
for all commands except for delPlugins.

Do not export MergeCNIRuntimeConfig and call it from
CreateCNIRuntimeConf that now accepts a delegate ptr.

Call it from delPlugins for each delegate

Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:34:15 -05:00
Billy McFall
2cd91b6bad Update vendor for network-attachment-definition-client update
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:06:54 -05:00
Billy McFall
e064679967 Point to latest network-attachment-definition-client
Move Multus to use latest network-attachment-definition-client repo which
contains the device-info-spec changes.

Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-11-19 16:06:54 -05:00
Doug Smith
6a23d65fea Merge pull request #583 from aojea/cmdadddel
clarify cmdAdd error
2020-11-19 10:45:04 -05:00
Antonio Ojea
1b58bed5a6 clarify cmdAdd error
it can happen that a CNI Add operation is canceled because the
runtime sends a CNI Del too fast, so the pod was already deleted
when multus try to set the network status annotations.

Since multus is an executable and can not persist state, it can't
track the CNI requests that are done in parallel, but it can
assume that, if it was able to get the pod from the API, and in a
subsequent request the pod no longer exists, it is because it was
deleted.

Signed-off-by: Antonio Ojea <aojea@redhat.com>
2020-11-19 10:30:12 +01:00
Doug Smith
957e26674a Merge pull request #573 from rhdojun/patch-1
Update quickstart.md
2020-11-12 09:35:38 -05:00
Doug Smith
36d3874c4c Merge pull request #571 from Billy99/billy99-k8s-rebase
Bump kubernetes dependencies to v0.18.3
2020-10-27 15:36:55 -04:00
rhdojun
4bfc2b14f2 Update quickstart.md
Felt like the line wasn't finished yet.
2020-10-27 13:12:34 +01:00
Billy McFall
f62529d4ff Update source for k8s bump to v0.18.3
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-10-23 15:49:49 -04:00
Billy McFall
36b5edff29 Update vendor, go.mod and go.sum for k8s bump to v0.18.3
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-10-23 10:55:54 -04:00
Billy McFall
c8739f64b9 Run 'go fmt' on code base.
Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
2020-10-23 10:43:23 -04:00
Doug Smith
bc95a19c5d Merge pull request #559 from s1061123/fix/remove-travis
Remove travis.yml because CI is now switched to github/actions
2020-09-15 20:06:16 -04:00
Tomofumi Hayashi
0e4b8be1e7 Remove travis.yml because CI is now switched to github/actions 2020-09-15 16:47:14 +09:00
Doug Smith
d13e06a5df Merge pull request #556 from s1061123/add-default-route-e2e
Add e2e tests for default-route annotation
2020-09-14 13:31:45 -04:00
Tomofumi Hayashi
ff3ee290c9 Add e2e tests for default-route annotation 2020-09-14 12:56:12 +09:00
Shane Utt
8cc60f7dd8 Add ARM64 to images
This PR takes the existing ARM64v8 build
and creates Daemonsets in the install images
to deploy it for ARM based K8s clusters.

Fixes #554

Signed-off-by: Shane Utt <shaneutt@linux.com>
2020-09-14 12:47:34 +09:00
venu iyer
292d9a9af3 Allow the default-route to be empty
In
https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ,
section 4.1.2.1.9,

"
4.1.2.1.9 “default-route” Default route selection for a particular attachment

This optional key with value of type string-array is used to explicitly select
which attachment will receive the default route. The value of items in the
“default-route” array are intended to be gateways, e.g. an IP address to which
packets that do not match any other routes are sent. This key must only be set
on one item in the Network Attachment Selection Annotation. This list may be empty.
"

However en empty list will fail currently; this change accommodates an
empty "default-route" by retaining the default route added by the
delegate.

Signed-off-by: venugopal iyer <venugopali@nvidia.com>
2020-09-08 14:53:25 -07:00
Doug Smith
2a01aec396 Merge pull request #552 from intel/fix/e2e
Update e2e
2020-09-03 13:50:14 -04:00
Tomofumi Hayashi
17b24d5fd5 Update e2e
This change contains following:
 - update kind version to 0.8.1
 - update setup script due to update
 - add teardown script
 - update README
2020-09-04 02:20:35 +09:00
Doug Smith
2dab3225de Merge pull request #550 from s1061123/fix/image-url
Specify image repo URL explicitly
2020-08-31 08:36:41 -04:00
Tomofumi Hayashi
95cc1902c0 Specify image repo URL explicitly
This fix supplies domain for container image repo because
in some runtime configuration, 'docker.io' is not specified as
default container repository url.
2020-08-31 14:58:10 +09:00
Mamduh Alassi
36f2fd64e0 Use latest image version 3.6 in daemonset files 2020-08-20 22:35:19 +09:00
Doug Smith
be492e1778 Merge pull request #545 from clementnuss/master
Pull underlying capabilities up to 00-multus.conf
2020-08-06 09:37:02 -04:00
Clément Nussbaumer
4c271a57d5 Pull underlying capabilities up to 00-multus.conf
Using Python, we analyze the content of the master plugin configuration,
and we integrate any capabilities from the underlying chained plugins
declaration. We only pull enabled capabilties from the underlying
(chained) plugin definitions.

Fixes intel#544

Signed-off-by: <Clement.Nussbaumer@Swisscom.com>
2020-08-05 10:29:05 +02:00
Doug Smith
e15f97860f Merge pull request #541 from s1061123/fix/crd
Fix CRD yaml file for right explan information
2020-07-28 10:39:13 -04:00
Tomofumi Hayashi
78669323d2 Fix CRD yaml file for right explan information 2020-07-28 11:56:08 +09:00
Doug Smith
c85b79f5ff Merge pull request #536 from s1061123/fix/update-vendor
Update vendors
2020-07-21 20:17:50 -04:00
Tomofumi Hayashi
801e1e8940 Update vendors 2020-07-22 09:15:24 +09:00
Doug Smith
41fefbb001 Merge pull request #522 from s1061123/fix/del-error-fix
Fix/del error fix
2020-07-21 17:13:33 -04:00
Tomofumi Hayashi
4eb6ae1553 Fix error handling on cmdDel
Fix #519
2020-07-21 14:52:07 +09:00
Doug Smith
b5211cb0d2 Merge pull request #534 from bmwiedemann/date
Allow to override build date with SOURCE_DATE_EPOCH
2020-07-09 09:52:28 -04:00
Bernhard M. Wiedemann
29df24fa6c Allow to override build date with SOURCE_DATE_EPOCH
in order to make builds reproducible.
See https://reproducible-builds.org/ for why this is good
and https://reproducible-builds.org/specs/source-date-epoch/
for the definition of this variable.

This date call only works with GNU date.

Also use UTC to be independent of timezone.
2020-07-08 05:08:11 +02:00
Doug Smith
e4a8c9f639 Merge pull request #530 from rktidwell/master
build: Enable -mod build flag to be toggled via environment variable
2020-07-06 08:49:58 -04:00
Adrian Chiris
2796202d8c Add infinibandGUID runtime config to delegate netconf
This commit extend the various network conf methods
to translate `infiniband-guid` network attachment configuration
to `infinibandGUID` CNI runtime config.

- Update relevant methods in types package
- Update unit tests
2020-06-26 14:55:21 +09:00
Adrian Chiris
46be700cb5 Struct updates
- Add `infinibandGUID` CNI runtime config
- Add `infiniband-guid` Network attachment attribute
- Reflect infiniband guid request in Delegate network config
2020-06-26 14:55:21 +09:00
Ryan Tidwell
a167d54608 build: Enable -mod build flag to be toggled via environment variable
This change will enable builds to be performed with the option of
passing the -mod flag to 'go build'. This can be done by optionally
setting the MODMODE environment variable prior to building.

For example, toggling the -mod setting for the build can be
accomplished as follows:

```
$ MODMODE=vendor ./build
```

Signed-off-by: Ryan Tidwell <rtidwell@suse.com>
2020-06-24 16:11:19 -05:00
Shahar Klein
ecb79330c0 Add support for log rotation
When using debug level the log files tends to fill up quickly.
Add support for log rotation using lumberjack, simple and easy to use
log rotation module.

Signed-off-by: Shahar Klein <sklein@nvidia.com>
2020-06-17 15:31:19 +09:00
Doug Smith
3882e405ef Merge pull request #515 from s1061123/fix/namespace-in-status
Fix network status name/namespace to compliant with multi-net-spec
2020-06-09 15:08:23 -04:00
Doug Smith
5e15ff18f8 Merge pull request #518 from dougbtv/readme-typo
README typo for roll-YOUR-own
2020-06-09 15:07:49 -04:00
dougbtv
28bb98f78b README typo for roll-YOUR-own 2020-06-09 15:07:19 -04:00
Tomofumi Hayashi
e7c8977423 Fix network status name/namespace to compliant with multi-net-spec
This fix changes network status name to net-attach-def name with
namespace, to compliant with k8s npwg's multi-net-spec.
2020-06-09 12:43:49 +09:00
Doug Smith
fff8e490b9 Merge pull request #511 from s1061123/fix/error-on-fail
Change the error handling for kubernetes client
2020-06-08 09:14:02 -04:00
dougbtv
809a16fbed Adds code of conduct 2020-06-05 09:18:10 +09:00
Tomofumi Hayashi
f5dcb9d8be Change the error handling for kubernetes client
This change adds retry to wait a bit if kubernetes client
returns service unavailable.

This fixes #492
2020-06-01 23:17:42 +09:00
Doug Smith
fdd7d30095 Merge pull request #507 from s1061123/dev/update-example
Simplify examples directory
2020-05-28 09:59:10 -04:00
Tomofumi Hayashi
c976f6bceb Add deviceid in clusterNetwork 2020-05-28 10:00:29 +09:00
Tomofumi Hayashi
8131923b14 Simplify examples directory 2020-05-19 21:29:19 +09:00
Tomofumi Hayashi
4eac660359 Introduce gopkg.in for go module
This change introduces gopkg.in to allow everyone to use multus
code as library. Currently multus-cni uses version 'v3', hence
it hits go modules limitation, as #504 describes.

This change fix #504 with gopkg.in.
2020-05-15 21:33:26 +09:00
dougbtv
ec5fd6c923 Move pre-1.16 Kubernetes assets to a deprecated folder (to later remove) 2020-05-08 09:50:33 +09:00
Doug Smith
b8540e190d Merge pull request #455 from cucosion/feature/gkeexample
Daemonset for Google Kubernetes Engine for kubernetes cluster version…
2020-04-30 09:44:45 -04:00
Doug Smith
a4c2b67784 Merge pull request #497 from s1061123/fix/improve-cni-logs
[WIP] Fix/improve cni logs
2020-04-30 08:43:09 -04:00
Tomofumi Hayashi
b2439f8279 Simplify error message in case of delegating CNI error
This fix removes redundant error messages and try to simplify
if the error comes from delegating CNI plugins.
2020-04-30 18:02:05 +09:00
dougbtv
c8fc357f05 Adds development docs note regarding issue policy 2020-04-30 10:59:26 +09:00
dougbtv
92beb4bcdc Sets the Kubernetes API calls timeout to 60 seconds 2020-04-23 10:47:11 +09:00
dougbtv
931c12531d Allows namespaceIsolation to allow pods in any namespace refer to the default namespace 2020-04-22 09:08:11 +09:00
Doug Smith
8dba1f4a37 Merge pull request #487 from redhat-nfvpe/dev/test
Skip docker push action if REPOSITORY_PASS is not set
2020-04-21 08:56:08 -04:00
Tomofumi Hayashi
aeaf78a310 Skip docker push action if REPOSITORY_PASS is not set 2020-04-21 21:54:44 +09:00
Tomofumi Hayashi
ab073f88cd Add error message in case of unexpected situation
To troubleshooting #481, adds error message for further analysis.
2020-04-21 21:42:57 +09:00
Ted Yu
1df1e4f530 Check Pod parameter against nil before calling Eventf
Signed-off-by: Ted Yu <yuzhihong@gmail.com>
2020-04-21 00:12:36 +09:00
dougbtv
81297f5b74 Updates Dockerfile to golang 1.13 (specifying version) 2020-04-20 21:44:17 +09:00
dougbtv
3ddc6250de Fix pre 1.16 api version for CRDs 2020-04-17 17:10:20 +09:00
dougbtv
8fff5aac60 Add description fields to NetworkAttachmentDefinition CRD 2020-04-16 02:01:40 +09:00
Doug Smith
6ccf03e8b3 Merge pull request #473 from ashish-billore/master
Error log for network-attachment-definition
2020-04-02 09:46:00 -04:00
dougbtv
c0891354ff Adds readinessindicatorfile check on CNI DEL 2020-04-02 22:40:09 +09:00
ashish-billore
ea95133ef4 Error log for network-attachment-definition
Corrected the error for missing network-attachment-definition
updated test for this error.
2020-04-02 17:40:32 +09:00
Doug Smith
5931d24639 Merge pull request #469 from ashish-billore/patch-2
Update quickstart.md
2020-03-30 08:40:33 -04:00
Ashish Billore
055f74fc61 Update quickstart.md 2020-03-30 14:02:35 +09:00
Mamduh Alassi
14fb6be109 Use latest image version 3.4.1 in daemonset file 2020-03-27 00:21:21 +09:00
ICucos
b6bfb75af0 Daemonset for Google Kubernetes Engine for kubernetes cluster version pre 1.16. To be able to start cluster with version 1.15 i used "gcloud container clusters create cl-name --zone europe-west1-b --num-nodes=2 --machine-type=n1-standard-8 --image-type ubuntu --enable-network-policy --cluster-version=1.15.9-gke.24". !!!!important to start the cluster with --enable-network-policy that will enable CNI in kublet 2020-03-25 13:03:44 +02:00
Tomofumi Hayashi
7f50f5f175 Fix CI workflow 2020-03-23 23:49:05 +09:00
Doug Smith
ea72e56fab Merge pull request #462 from s1061123/dev/add-arm64v8-ci
Add arm64 build in CI workflow
2020-03-23 09:52:25 -04:00
Tomofumi Hayashi
055a7568ad Logging improvement (UID, net-attach-def)
This change adds pod UID and net-attach-def name in verbose log
and sends kubernetes event when net-attach-def is not found.
2020-03-23 21:32:33 +09:00
Tomofumi Hayashi
c7cd0ef822 Add arm64 build in CI workflow 2020-03-23 13:00:50 +09:00
Trevor Tao
f885b38332 Add Dockerfile for building image on arm64
Add Dockerfile.arm64 for building Multus image on arm64.

Actually it's multi-arch capable and works for
amd64 platform too if used.

Signed-off-by: Trevor Tao <trevor.tao@arm.com>
2020-03-19 22:41:27 +09:00
Trevor Tao
cd1a76f919 Add Dockerfile for building image on arm64
Add Dockerfile.arm64 for building Multus image on arm64.

Actually it's multi-arch capable and works for
amd64 platform too if used.

Signed-off-by: Trevor Tao <trevor.tao@arm.com>
2020-03-19 22:41:27 +09:00
Tomofumi Hayashi
079c853eba Add Kubernetes event log when the pod is launched 2020-03-16 22:08:03 +09:00
Tomofumi Hayashi
bfaf22964b Fix goreleaser file for build-release error 2020-03-16 21:58:41 +09:00
ICucos
1bc8064a5b Daemonset for Google Kubernetes Engine for kubernetes cluster version 1.16 +. To be able to start cluster with version 1.16 i used rapid channel "gcloud beta container clusters create cluster --zone europe-west1-b --num-nodes=1 --machine-type=n1-standard-8 --image-type ubuntu --enable-network-policy --release-channel=rapid --enable-autorepair" 2020-03-16 13:19:36 +02:00
Tomofumi Hayashi
eaf6ff6e20 Update vendor file to fix #426 2020-03-13 12:16:49 +09:00
Adrian Chiris
5577822b36 Pass DeviceID to each plugin in configuration list
Until today, it was hardcoded that DeviceID will only be
injected for the first CNI in the chain.

This commit modifies multus to pass DeviceID to each network
configuration element in a network configuration list.
This will  allow multiple CNI's to act on DeviceID when CNI
plugins are being chained for a specific network.

The change is required to allow CNI's to ensure network
isolation (introduced in kernel >= 5.2.0 see [1]) for RDMA devices
when exist.

e.g for SR-IOV network:
sriov-cni moves network device associated with the provided DeviceID
to to the container's network namespace.
An "RDMA cni" would do the same for the corresponding RDMA device when
RDMA traffic is desired on the network.

[1] https://patchwork.kernel.org/cover/10810451/
2020-03-12 23:48:03 +09:00
Periyasamy Palanisamy
32fe803221 Log message after logger initialization
In cmdCheck and cmdDelete debug messages are logged even before logger
is initialized with its log level and file (done in types.LoadNetConf).
Because of this those debug messages are not logged into the file.

Signed-off-by: Periyasamy Palanisamy <periyasamy.palanisamy@est.tech>
2020-03-05 23:48:15 +09:00
Doug Smith
1412caafbd Merge pull request #446 from s1061123/no-config-invalidation
Removes configuration invalidation
2020-03-02 10:08:45 -05:00
Tomofumi Hayashi
bdece11192 Removes configuration invalidation 2020-02-28 12:43:49 +09:00
Doug Smith
32952b929c Merge pull request #442 from s1061123/dev/kind-e2e
[WIP]Add e2e test with kind in CI pipeline
2020-02-20 09:35:25 -05:00
dougbtv
1042a5e25f Changes wait.ExponentialBackoff to wait.PollImmediate and exposes readinessindicatorfile via entrypoint parameter 2020-02-19 10:03:43 +09:00
dougbtv
9774ddade1 [docs] Updates quickstart to use alpine image 2020-02-13 17:09:37 +09:00
Tomofumi Hayashi
76c458228e Add e2e test with kind in CI pipeline 2020-02-13 17:02:22 +09:00
dougbtv
4fdaf3f427 [bugfix] Fixes unnecessary wait when readinessindicatorfile is not present 2020-02-13 01:04:28 +09:00
Tomofumi Hayashi
8334b73296 Add Github Actions for CI pipeline, to be replace with travis 2020-02-12 22:57:04 +09:00
groclawski
3e601c6ffe double word fix 2020-02-11 00:04:04 +09:00
Tomofumi Hayashi
9c35b5ccf2 Fix travis CI 2020-02-10 16:01:18 +09:00
Tomofumi Hayashi
da9da81be9 Fix travis ci error 2020-01-25 01:18:32 +09:00
Doug Smith
36f630fd39 Merge pull request #431 from s1061123/dev/container-march-images
Support container multi architecture image with manifest
2020-01-23 09:40:07 -05:00
Tomofumi Hayashi
dfd19e5b10 Support container multi architecture image with manifest 2020-01-21 02:48:10 +09:00
Doug Smith
c534b7d364 Uses atomic operation for writing config file (#430) 2020-01-17 16:10:46 -05:00
Michael C. Cambria
c35c4b7e97 Add CNI Check support (#427)
Add tests for CNI version 0.4.0
2020-01-13 09:57:21 -05:00
Tomofumi Hayashi
b2bf154328 Fix travis ci (#421)
This change update travis pipeline as following:
 - cleanup unused parameters/stages
 - add condition based on parameter (now user can use travis CI
   in cloned repo)
 - suppress ppc64 deployment because amd64 also build ppc images
   and other arch (fixes #418)
2019-12-11 15:41:36 -05:00
Tomofumi Hayashi
9874c14e23 Replace multus code with NPWG client library code 2019-12-10 14:38:09 +09:00
Tomofumi Hayashi
f4f2f65d1d Use NPWG client library to manipulate net-attach-defs
This change introduce NPWG client library functions to manipulate
net-attach-defs. This also requires to change k8sclient and unit
test code as well.
2019-12-10 14:38:09 +09:00
Shravan Vallala
cb3f59e7e7 Server URL in multus kubeconfig at /etc/cni/net.d/multus.d/multus.kubeconfig
is not properly setup for IPv6 installs

Fix:

If the API server host is IPv6 address then the server url should
include square braces to separate the IP address and port correctly
2019-12-10 09:09:37 +09:00
Shravan Vallala
a3712815fd Server URL in multus kubeconfig at
/etc/cni/net.d/multus.d/multus.kubeconfig is not properly setup for IPv6
installs

Fix:

If the API server host is IPv6 address then the server url should
include square braces to separate the IP address and port correctly
2019-12-10 09:09:37 +09:00
Doug Smith
230f314877 Bump daemonset tag to v3.4 (#417) 2019-12-03 12:38:10 -08:00
Tomofumi Hayashi
a314f90dda Fix unit test to track on updated error message 2019-11-15 08:31:49 -05:00
dougbtv
ecc474a264 Skips binary copy in entrypoint with new parameter 2019-11-07 23:24:04 +08:00
Dan Williams
c7f957194b multus: print pod namespace/name in cmdAdd/cmdDel error messages
Signed-off-by: Dan Williams <dcbw@redhat.com>
2019-11-06 10:04:47 -05:00
dougbtv
8c76fd62e0 Adds cursory unit test for default-route and cleans lint (and one errant test) 2019-10-29 13:34:24 -04:00
dougbtv
3a9dd7ed76 Properly uses default-route in annotation to set the gateway. Fixes network status results. 2019-10-29 13:34:24 -04:00
Tomofumi Hayashi
165e23b72c Support gateway in NetworkSelectionElement
Changes config JSON from gateway to default-route, adds Readme, fixes lint

Co-authored-by: dougbtv <dosmith@redhat.com>
2019-10-29 13:34:24 -04:00
Michal Rostecki
84c348ce18 build: Allow to define VERSION and COMMIT without git
Previously the build script required git to be used and installed which
did not allow to build Multus from a tarball which doesn't contain .git
directory. That made packaging of Multus hard.

Example usage after the change if you do not want to use git:

```
$ VERSION=v3.3 COMMIT=ba33df ./build
```

Signed-off-by: Michal Rostecki <mrostecki@opensuse.org>
2019-10-29 09:06:44 -04:00
Tomofumi Hayashi
8bdb7104d7 Update k8s npwg repo name due to its change 2019-10-29 09:06:27 -04:00
Tim Rozet
3fbfe40e12 Fixes K8sNetworkPlumbingWG package capitalization
Migrates package to use capitalized name, also cleans up dependencies.

Signed-off-by: Tim Rozet <trozet@redhat.com>
2019-10-21 10:42:49 -04:00
Tim Rozet
e8baccff16 Removes duplicate NetworkAttachment CRD ref
The CRD is now defined in the NetworkPlumbingWG org, along with
generated libs for client, listers, informers, and deep copy functions.
Therefore remove the duplicate definition here, and use the standard
definition.

Signed-off-by: Tim Rozet <trozet@redhat.com>
2019-10-21 10:42:49 -04:00
Tomofumi Hayashi
2745e46ed8 Support 'cni-args' in NetworkSelectionElement 2019-10-21 21:33:07 +09:00
Tomofumi Hayashi
adec211ae1 Support CNI RuntimeConfig for portmap/bandwidth/ip/mac
This changes introduce CNI RuntimeConfig for portmap, bandwidth,
ip and mac for latest specification. IP and Mac is previously
applied through CNI_ARGS, but it is changed to RuntimeConfig
for now.
2019-10-16 09:20:11 -04:00
Tomofumi Hayashi
0a2f7b18d3 Improve error/debug message
This diff changes error message with fixed format for easy-to-read
for users.
2019-10-10 10:32:45 -04:00
2278 changed files with 379946 additions and 178920 deletions

24
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
on: [push, pull_request]
name: Build
jobs:
build:
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
goarch: [386, amd64, arm, arm64, ppc64le, s390x]
os: [ubuntu-latest] #, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Build
env:
GOARCH: ${{ matrix.goarch }}
GOOS: ${{ matrix.goos }}
run: ./hack/build-go.sh

110
.github/workflows/image-build.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
name: Image build
on: [pull_request]
jobs:
build-amd64:
name: Image build/amd64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-amd64
file: deployments/Dockerfile
build-arm64:
name: Image build/arm64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-arm64
file: deployments/Dockerfile.arm64
build-arm32:
name: Image build/arm32
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-arm32
file: deployments/Dockerfile.arm32
build-ppc64le:
name: Image build/ppc64le
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-ppc64le
file: deployments/Dockerfile.ppc64le
build-s390:
name: Image build/s390x
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-s390x
file: deployments/Dockerfile.s390x
build-origin:
name: Image build/origin
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build container image
uses: docker/build-push-action@v2
with:
context: .
push: false
tags: ghcr.io/${{ github.repository }}:latest-origin
file: deployments/Dockerfile.openshift

225
.github/workflows/image-push-master.yml vendored Normal file
View File

@@ -0,0 +1,225 @@
name: Image push for master
on:
push:
branches:
- master
jobs:
push-amd64:
name: Image push/amd64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-amd64
ghcr.io/${{ github.repository }}:snapshot-amd64
file: deployments/Dockerfile
push-arm64:
name: Image push/arm64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-arm64
ghcr.io/${{ github.repository }}:snapshot-arm64
file: deployments/Dockerfile.arm64
push-arm32:
name: Image push/arm32
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-arm32
ghcr.io/${{ github.repository }}:snapshot-arm32
file: deployments/Dockerfile.arm32
push-ppc64le:
name: Image push/ppc64le
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-ppc64le
ghcr.io/${{ github.repository }}:snapshot-ppc64le
file: deployments/Dockerfile.ppc64le
push-s390x:
name: Image push/s390x
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-s390x
ghcr.io/${{ github.repository }}:snapshot-s390x
file: deployments/Dockerfile.s390x
push-origin:
name: Image push/origin
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:latest-origin
ghcr.io/${{ github.repository }}:snapshot-origin
file: deployments/Dockerfile.openshift
push-manifest:
needs: [push-amd64, push-arm64, push-ppc64le, push-s390x]
runs-on: ubuntu-latest
env:
REPOSITORY: ghcr.io/${{ github.repository }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create manifest for multi-arch images
if: github.repository_owner == 'k8snetworkplumbingwg'
run: |
# get artifacts from previous steps
docker pull ${{ env.REPOSITORY }}:snapshot-amd64
docker pull ${{ env.REPOSITORY }}:snapshot-arm64
docker pull ${{ env.REPOSITORY }}:snapshot-arm32
docker pull ${{ env.REPOSITORY }}:snapshot-ppc64le
docker pull ${{ env.REPOSITORY }}:snapshot-s390x
docker pull ${{ env.REPOSITORY }}:latest-amd64
docker pull ${{ env.REPOSITORY }}:latest-arm64
docker pull ${{ env.REPOSITORY }}:latest-arm32
docker pull ${{ env.REPOSITORY }}:latest-ppc64le
docker pull ${{ env.REPOSITORY }}:latest-s390x
docker manifest create ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-amd64 ${{ env.REPOSITORY }}:snapshot-arm64 ${{ env.REPOSITORY }}:snapshot-arm32 ${{ env.REPOSITORY }}:snapshot-ppc64le ${{ env.REPOSITORY }}:snapshot-s390x
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-amd64 --arch amd64
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-arm64 --arch arm64
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-arm32 --arch arm
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-ppc64le --arch ppc64le
docker manifest annotate ${{ env.REPOSITORY }}:snapshot ${{ env.REPOSITORY }}:snapshot-s390x --arch s390x
docker manifest push ${{ env.REPOSITORY }}:snapshot
docker manifest create ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-amd64 ${{ env.REPOSITORY }}:latest-arm64 ${{ env.REPOSITORY }}:latest-arm32 ${{ env.REPOSITORY }}:latest-ppc64le ${{ env.REPOSITORY }}:latest-s390x
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-amd64 --arch amd64
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-arm64 --arch arm64
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-arm32 --arch arm
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-ppc64le --arch ppc64le
docker manifest annotate ${{ env.REPOSITORY }}:latest ${{ env.REPOSITORY }}:latest-s390x --arch s390x
docker manifest push ${{ env.REPOSITORY }}:latest

274
.github/workflows/image-push-release.yml vendored Normal file
View File

@@ -0,0 +1,274 @@
name: Image push release
on:
push:
tags:
- v*
jobs:
push-amd64:
name: Image push/amd64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-amd64
${{ steps.docker_meta.outputs.tags }}-amd64
file: deployments/Dockerfile
push-arm64:
name: Image push/arm64
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-arm64
${{ steps.docker_meta.outputs.tags }}-arm64
file: deployments/Dockerfile.arm64
push-arm32:
name: Image push/arm32
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-arm32
${{ steps.docker_meta.outputs.tags }}-arm32
file: deployments/Dockerfile.arm32
push-ppc64le:
name: Image push/ppc64le
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-ppc64le
${{ steps.docker_meta.outputs.tags }}-ppc64le
file: deployments/Dockerfile.ppc64le
push-s390x:
name: Image push/s390x
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-s390x
${{ steps.docker_meta.outputs.tags }}-s390x
file: deployments/Dockerfile.s390x
push-origin:
name: Image push/origin
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Push container image
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: |
ghcr.io/${{ github.repository }}:stable-origin
${{ steps.docker_meta.outputs.tags }}-origin
file: deployments/Dockerfile.openshift
push-manifest:
needs: [push-amd64, push-arm64, push-ppc64le, push-s390x]
runs-on: ubuntu-latest
env:
REPOSITORY: ghcr.io/${{ github.repository }}
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ github.repository }}
tag-latest: false
- name: Login to GitHub Container Registry
if: github.repository_owner == 'k8snetworkplumbingwg'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create manifest for multi-arch images
if: github.repository_owner == 'k8snetworkplumbingwg'
run: |
# get artifacts from previous steps
docker pull ${{ steps.docker_meta.outputs.tags }}-amd64
docker pull ${{ steps.docker_meta.outputs.tags }}-arm64
docker pull ${{ steps.docker_meta.outputs.tags }}-arm32
docker pull ${{ steps.docker_meta.outputs.tags }}-ppc64le
docker pull ${{ steps.docker_meta.outputs.tags }}-s390x
docker manifest create ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-amd64 ${{ steps.docker_meta.outputs.tags }}-arm64 ${{ steps.docker_meta.outputs.tags }}-arm32 ${{ steps.docker_meta.outputs.tags }}-ppc64le ${{ steps.docker_meta.outputs.tags }}-s390x
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-amd64 --arch amd64
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-arm64 --arch arm64
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-arm32 --arch arm
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-ppc64le --arch ppc64le
docker manifest annotate ${{ steps.docker_meta.outputs.tags }} ${{ steps.docker_meta.outputs.tags }}-s390x --arch s390x
docker manifest push ${{ steps.docker_meta.outputs.tags }}
docker pull ${{ env.REPOSITORY }}:stable-amd64
docker pull ${{ env.REPOSITORY }}:stable-arm64
docker pull ${{ env.REPOSITORY }}:stable-arm32
docker pull ${{ env.REPOSITORY }}:stable-ppc64le
docker pull ${{ env.REPOSITORY }}:stable-s390x
docker manifest create ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-amd64 ${{ env.REPOSITORY }}:stable-arm64 ${{ env.REPOSITORY }}:stable-arm32 ${{ env.REPOSITORY }}:stable-ppc64le ${{ env.REPOSITORY }}:stable-s390x
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-amd64 --arch amd64
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-arm64 --arch arm64
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-arm32 --arch arm
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-ppc64le --arch ppc64le
docker manifest annotate ${{ env.REPOSITORY }}:stable ${{ env.REPOSITORY }}:stable-s390x --arch s390x
docker manifest push ${{ env.REPOSITORY }}:stable

46
.github/workflows/kind-e2e.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: e2e-kind
on: [push, pull_request]
jobs:
e2e-kind:
runs-on: ubuntu-latest
if: >
(( github.event.pull_request.head.repo.owner.login != github.event.pull_request.base.repo.owner.login ) &&
github.event_name == 'pull_request' ) || (github.event_name == 'push' && github.event.commits != '[]' )
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Setup registry
run: docker run -d --restart=always -p "5000:5000" --name "kind-registry" registry:2
- name: Build latest-amd64
run: docker build -t localhost:5000/multus:e2e -f deployments/Dockerfile .
- name: Push to local registry
run: docker push localhost:5000/multus:e2e
- name: Get kind/kubectl/koko
working-directory: ./e2e
run: ./get_tools.sh
- name: Setup cluster
working-directory: ./e2e
run: ./setup_cluster.sh
- name: Test simple pod
working-directory: ./e2e
run: ./test-simple-pod.sh
- name: Test macvlan1
working-directory: ./e2e
run: ./test-simple-macvlan1.sh
- name: Test default route1
working-directory: ./e2e
run: ./test-default-route1.sh
- name: cleanup cluster and registry
run: |
kind delete cluster
docker kill kind-registry
docker rm kind-registry

26
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
name: Release binaries
on:
push:
tags:
- 'v*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

15
.github/workflows/stale-issues-prs.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v3
with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
days-before-stale: 90
days-before-close: 7

49
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,49 @@
on: [push, pull_request]
name: Test
jobs:
test:
strategy:
matrix:
go-version: [1.16.x, 1.17.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Run Revive
run: |
GO111MODULE=off go get github.com/mgechev/revive
$(go env GOPATH)/bin/revive -exclude ./vendor/... ./... # this is ouput for user
$(go env GOPATH)/bin/revive -exclude ./vendor/... ./...| xargs -0 -r false # this is for github actions
- name: Run go fmt
run: go fmt ./...
#run: diff -u <(echo -n) <(gofmt -d -s .)
- name: Run go vet
run: go vet ./...
- name: Test
run: sudo ./hack/test-go.sh
- name: Send coverage
uses: shogo82148/actions-goveralls@v1
with:
path-to-profile: coverage.out
flag-name: Go-${{ matrix.go }}
parallel: true
# notifies that all test jobs are finished.
finish:
needs: test
runs-on: ubuntu-latest
steps:
- uses: shogo82148/actions-goveralls@v1
with:
parallel-finished: true

6
.gitignore vendored
View File

@@ -1,9 +1,15 @@
# Binary output dir
bin/
e2e/bin/
# GOPATH created by the build script
gopath/
# Editor paths
.swp*
.swo*
.idea*
# Test outputs
*.out
*.test

View File

@@ -9,7 +9,7 @@ builds:
-
env:
- CGO_ENABLED=0
main: ./multus/
main: ./cmd/
goos:
- linux
goarch:
@@ -17,8 +17,9 @@ builds:
- amd64
- arm
- arm64
archive:
wrap_in_directory: true
- s390x
archives:
- wrap_in_directory: true
checksum:
name_template: 'checksums.txt'
snapshot:

View File

@@ -1,23 +1,31 @@
os: linux
language: go
# see https://docs.travis-ci.com/user/reference/overview/#Virtualization-environments
# for the detail
# sudo: requried
dist: trusty
dist: bionic
services:
- docker
go:
- 1.12.x
- 1.13.x
env:
global:
- GO111MODULE=on
- REGISTRY_USER=${REGISTRY_USER}
- REGISTRY_USER=${REGISTRY_USER:-nfvpe}
- REGISTRY_PASS=${REGISTRY_PASS}
- REPOSITORY_NAME=${REPOSITORY_NAME}
- REPOSITORY_USER=${REPOSITORY_USER}
- DOCKER_CLI_EXPERIMENTAL="enabled"
- secure: "${REGISTRY_SECURE}"
matrix:
jobs:
- TARGET=amd64
- TARGET=ppc64le
before_install:
- if [ "${REPOSITORY_NAME}" = "" ]; then export REPOSITORY_NAME=multus; fi
- sudo apt-get update -qq
- go get github.com/mattn/goveralls
@@ -33,73 +41,76 @@ before_script:
# - gocyclo -over 15 ./multus
script:
- GOARCH="${TARGET}" ./build
- GOARCH="${TARGET}" ./hack/build-go.sh
- |
if [ "${TARGET}" == "amd64" ]; then
sudo env PATH=${PATH} ./test.sh
sudo env PATH=${PATH} ./scripts/test.sh
goveralls -coverprofile=coverage.out -service=travis-ci
mkdir -p ${TRAVIS_BUILD_DIR}/dist
tar cvfz ${TRAVIS_BUILD_DIR}/dist/multus-cni_amd64.tar.gz --warning=no-file-changed --exclude="dist" .
docker build -t nfvpe/multus:latest-amd64 .
docker build -t nfvpe/multus:latest-ppc64le -f Dockerfile.ppc64le .
docker build -t nfvpe/multus-origin:latest -f Dockerfile.openshift .
docker build -t ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 .
docker build -t ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le -f Dockerfile.ppc64le .
docker build -t ${REPOSITORY_USER}/${REPOSITORY_NAME}-origin:latest -f Dockerfile.openshift .
fi
deploy:
# Release on versioned tag (e.g. v1.0)
- provider: script
#skip_cleanup: true
script: curl -sL https://git.io/goreleaser | bash
#cleanup: false
script: curl -sL https://git.io/goreleaser
on:
tags: true
all_branches: true
condition: "$TRAVIS_TAG =~ ^v[0-9].*$"
condition: "$TARGET = amd64 && $TRAVIS_TAG =~ ^v[0-9].*$ && ! -z $GITHUB_TOKEN && $TRAVIS_OS_NAME = linux"
# Push images to Dockerhub on tag
- provider: script
skip_cleanup: true
cleanup: false
script: >
bash -c '
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:latest;
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:stable;
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:stable-amd64;
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:$TRAVIS_TAG;
docker tag nfvpe/multus:latest-ppc64le nfvpe/multus:stable-ppc64le;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-amd64;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:$TRAVIS_TAG;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-ppc64le;
docker login -u "$REGISTRY_USER" -p "$REGISTRY_PASS";
docker push nfvpe/multus:latest;
docker push nfvpe/multus:latest-amd64;
docker push nfvpe/multus:latest-ppc64le;
docker push nfvpe/multus:stable;
docker push nfvpe/multus:stable-amd64;
docker push nfvpe/multus:stable-ppc64le;
docker push nfvpe/multus:$TRAVIS_TAG;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-amd64;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-ppc64le;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:$TRAVIS_TAG;
export DOCKER_CLI_EXPERIMENTAL="enabled";
docker manifest create ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 --arch amd64;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le --arch ppc64le;
docker manifest push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest;
docker manifest create ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-ppc64le;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-amd64 --arch amd64;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable-ppc64le --arch ppc64le;
docker manifest push ${REPOSITORY_USER}/${REPOSITORY_NAME}:stable;
echo done'
on:
tags: true
all_branches: true
condition: "$TRAVIS_TAG =~ ^v[0-9].*$"
condition: "$TRAVIS_TAG =~ ^v[0-9].*$ && -n $REGISTRY_USER && -n $REGISTRY_PASS && -n $REPOSITORY_NAME && -n $REPOSITORY_USER"
# Push images to Dockerhub on merge to master
- provider: script
on:
branch: master
condition: "-n $REGISTRY_USER && -n $REGISTRY_PASS && -n $REPOSITORY_NAME && -n $REPOSITORY_USER"
script: >
bash -c '
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:snapshot;
docker tag nfvpe/multus:latest-amd64 nfvpe/multus:snapshot-amd64;
docker tag nfvpe/multus:latest-ppc64le nfvpe/multus:snapshot-ppc64le;
docker tag ${REPOSITORY_USER}/:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-amd64;
docker tag ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-ppc64le;
docker login -u "$REGISTRY_USER" -p "$REGISTRY_PASS";
docker push nfvpe/multus:snapshot;
docker push nfvpe/multus:snapshot-amd64;
docker push nfvpe/multus:snapshot-ppc64le;
docker push nfvpe/multus:latest;
docker push nfvpe/multus:latest-amd64;
docker push nfvpe/multus:latest-ppc64le;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-amd64;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-ppc64le;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64;
docker push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le;
docker manifest create ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-ppc64le;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-amd64 --arch amd64;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot-ppc64le --arch ppc64le;
docker manifest push ${REPOSITORY_USER}/${REPOSITORY_NAME}:snapshot;
docker manifest create ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-amd64 --arch amd64;
docker manifest annotate ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest-ppc64le --arch ppc64le;
docker manifest push ${REPOSITORY_USER}/${REPOSITORY_NAME}:latest;
echo done'
after_success:
# put build tgz to bintray
- curl -T ${TRAVIS_BUILD_DIR}/dist/multus-cni_amd64.tar.gz -u${BINTRAY_USER}:${BINTRAY_APIKEY} https://api.bintray.com/content/redhat-nfvpe/multus-cni-crd-snapshots/snapshot/snapshot-${TRAVIS_COMMIT}/multus-cni_amd64-${TRAVIS_COMMIT}.tar.gz
# publish uploaded file
- curl -X POST -u${BINTRAY_USER}:${BINTRAY_APIKEY} https://api.bintray.com/content/redhat-nfvpe/multus-cni-crd-snapshots/snapshot/snapshot-${TRAVIS_COMMIT}/publish
# put it in bintray download list
- sleep 20
- "curl -X PUT -H 'Accept: application/json' -H 'Content-type: application/json' -u${BINTRAY_USER}:${BINTRAY_APIKEY} https://api.bintray.com/file_metadata/redhat-nfvpe/multus-cni-crd-snapshots/multus-cni_amd64-${TRAVIS_COMMIT}.tar.gz -d '{\"list_in_downloads\":true}'"

130
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,130 @@
# Multus CNI Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[The Multus Slack Page](https://intel-corp.herokuapp.com/).
All complaints will be reviewed and investigated promptly and fairly. Or you
may specifically contact Doug Smith (dosmith@redhat.com) via email.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -1,20 +0,0 @@
# This Dockerfile is used to build the image available on DockerHub
FROM centos:centos7 as build
# Add everything
ADD . /usr/src/multus-cni
ENV INSTALL_PKGS "git golang"
RUN rpm --import https://mirror.go-repo.io/centos/RPM-GPG-KEY-GO-REPO && \
curl -s https://mirror.go-repo.io/centos/go-repo.repo | tee /etc/yum.repos.d/go-repo.repo && \
yum install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
cd /usr/src/multus-cni && \
./build
FROM centos:centos7
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -1,25 +0,0 @@
# This Dockerfile is used to build the image available on DockerHub
FROM centos:centos7 as build
# Add everything
ADD . /usr/src/multus-cni
ENV GOARCH "ppc64le"
ENV GOOS "linux"
ENV INSTALL_PKGS "git golang"
RUN rpm --import https://mirror.go-repo.io/centos/RPM-GPG-KEY-GO-REPO && \
curl -s https://mirror.go-repo.io/centos/go-repo.repo | tee /etc/yum.repos.d/go-repo.repo && \
yum install -y $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
cd /usr/src/multus-cni && \
./build
# build ppc container
FROM ppc64le/centos:latest
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -1,8 +1,8 @@
# Multus-CNI
![multus-cni Logo](https://github.com/intel/multus-cni/blob/master/doc/images/Multus.png)
![multus-cni Logo](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/images/Multus.png)
[![Travis CI](https://travis-ci.org/intel/multus-cni.svg?branch=master)](https://travis-ci.org/intel/multus-cni/builds)[![Go Report Card](https://goreportcard.com/badge/github.com/intel/multus-cni)](https://goreportcard.com/report/github.com/intel/multus-cni)[![Coverage Status](https://coveralls.io/repos/github/intel/multus-cni/badge.svg)](https://coveralls.io/github/intel/multus-cni)
[![Build](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/build.yml/badge.svg)](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/build.yml)[![Test](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/test.yml/badge.svg)](https://github.com/k8snetworkplumbingwg/multus-cni/actions/workflows/test.yml)[![Go Report Card](https://goreportcard.com/badge/github.com/k8snetworkplumbingwg/multus-cni)](https://goreportcard.com/report/github.com/k8snetworkplumbingwg/multus-cni)[![Coverage Status](https://coveralls.io/repos/github/k8snetworkplumbingwg/multus-cni/badge.svg)](https://coveralls.io/github/k8snetworkplumbingwg/multus-cni)
Multus CNI enables attaching multiple network interfaces to pods in Kubernetes.
@@ -18,11 +18,11 @@ Multus is one of the projects in the [Baremetal Container Experience kit](https:
Here's an illustration of the network interfaces attached to a pod, as provisioned by Multus CNI. The diagram shows the pod with three interfaces: `eth0`, `net0` and `net1`. `eth0` connects kubernetes cluster network to connect with kubernetes server/services (e.g. kubernetes api-server, kubelet and so on). `net0` and `net1` are additional network attachments and connect to other networks by using [other CNI plugins](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (e.g. vlan/vxlan/ptp).
![multus-pod-image](doc/images/multus-pod-image.svg)
![multus-pod-image](docs/images/multus-pod-image.svg)
## Quickstart Installation Guide
The quickstart installation method for Multus requires that you have first installed a Kubernetes CNI plugin to serve as your pod-to-pod network, which we refer to as your "default network" (a network interface that every pod will be creatd with). Each network attachment created by Multus will be in addition to this default network interface. For more detail on installing a default network CNI plugins, refer to our [quick-start guide](doc/quickstart.md).
The quickstart installation method for Multus requires that you have first installed a Kubernetes CNI plugin to serve as your pod-to-pod network, which we refer to as your "default network" (a network interface that every pod will be created with). Each network attachment created by Multus will be in addition to this default network interface. For more detail on installing a default network CNI plugins, refer to our [quick-start guide](docs/quickstart.md).
Clone this GitHub repository, we'll apply a daemonset which installs Multus using to `kubectl` from this repo. From the root directory of the clone, apply the daemonset YAML file:
@@ -30,22 +30,22 @@ Clone this GitHub repository, we'll apply a daemonset which installs Multus usin
$ cat ./images/multus-daemonset.yml | kubectl apply -f -
```
This will configure your systems to be ready to use Multus CNI, but, to get started with adding additional interfaces to your pods, refer to our complete [quick-start guide](doc/quickstart.md)
This will configure your systems to be ready to use Multus CNI, but, to get started with adding additional interfaces to your pods, refer to our complete [quick-start guide](docs/quickstart.md)
## Additional installation Options
- Install via daemonset using the quick-start guide, above.
- Download binaries from [release page](https://github.com/intel/multus-cni/releases)
- Download binaries from [release page](https://github.com/k8snetworkplumbingwg/multus-cni/releases)
- By Docker image from [Docker Hub](https://hub.docker.com/r/nfvpe/multus/tags/)
- Or, roll-you-own and build from source
- See [Development](doc/development.md)
- Or, roll-your-own and build from source
- See [Development](docs/development.md)
## Comprehensive Documentation
- [How to use](doc/how-to-use.md)
- [Configuration](doc/configuration.md)
- [Development](doc/development.md)
- [How to use](docs/how-to-use.md)
- [Configuration](docs/configuration.md)
- [Development](docs/development.md)
## Contact Us
For any questions about Multus CNI, feel free to ask a question in #general in the [Intel-Corp Slack](https://intel-corp.herokuapp.com/), or open up a GitHub issue.
For any questions about Multus CNI, feel free to ask a question in #general in the [NPWG Slack](https://npwg-team.slack.com/), or open up a GitHub issue. Request an invite to NPWG slack [here](https://intel-corp.herokuapp.com/).

59
cmd/main.go Normal file
View File

@@ -0,0 +1,59 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is a "Multi-plugin".The delegate concept referred from CNI project
// It reads other plugin netconf, and then invoke them, e.g.
// flannel or sriov plugin.
package main
import (
"flag"
"fmt"
"os"
"github.com/containernetworking/cni/pkg/skel"
cniversion "github.com/containernetworking/cni/pkg/version"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/multus"
)
func main() {
// Init command line flags to clear vendored packages' one, especially in init()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// add version flag
versionOpt := false
flag.BoolVar(&versionOpt, "version", false, "Show application version")
flag.BoolVar(&versionOpt, "v", false, "Show application version")
flag.Parse()
if versionOpt == true {
fmt.Printf("%s\n", multus.PrintVersionString())
return
}
skel.PluginMain(
func(args *skel.CmdArgs) error {
result, err := multus.CmdAdd(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error {
return multus.CmdCheck(args, nil, nil)
},
func(args *skel.CmdArgs) error { return multus.CmdDel(args, nil, nil) },
cniversion.All, "meta-plugin that delegates to other CNI plugins")
}

16
deployments/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
# This Dockerfile is used to build the image available on DockerHub
FROM golang:1.17.1 as build
# Add everything
ADD . /usr/src/multus-cni
RUN cd /usr/src/multus-cni && \
./hack/build-go.sh
FROM centos:centos7
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,21 @@
# This Dockerfile is used to build the image available on DockerHub
FROM golang:1.17.1 as build
# Add everything
ADD . /usr/src/multus-cni
ENV GOARCH "arm"
ENV GOOS "linux"
RUN cd /usr/src/multus-cni && \
./hack/build-go.sh
# build arm container
FROM arm32v7/centos:7
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,21 @@
# This Dockerfile is used to build the image available on DockerHub
FROM golang:1.17.1 as build
# Add everything
ADD . /usr/src/multus-cni
ENV GOARCH "arm64"
ENV GOOS "linux"
RUN cd /usr/src/multus-cni && \
./hack/build-go.sh
# build arm64 container
FROM arm64v8/centos:7
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -1,13 +1,14 @@
# This dockerfile is specific to building Multus for OpenShift
FROM openshift/origin-release:golang-1.10 as builder
FROM openshift/origin-release:golang-1.16 as builder
ADD . /usr/src/multus-cni
WORKDIR /usr/src/multus-cni
ENV GO111MODULE=off
RUN ./build
RUN ./hack/build-go.sh
FROM openshift/origin-base
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
RUN mkdir -p /usr/src/multus-cni/images && mkdir -p /usr/src/multus-cni/bin
COPY --from=builder /usr/src/multus-cni/bin/multus /usr/src/multus-cni/bin
ADD ./images/entrypoint.sh /

View File

@@ -0,0 +1,21 @@
# This Dockerfile is used to build the image available on DockerHub
FROM golang:1.17.1 as build
# Add everything
ADD . /usr/src/multus-cni
ENV GOARCH "ppc64le"
ENV GOOS "linux"
RUN cd /usr/src/multus-cni && \
./hack/build-go.sh
# build ppc container
FROM ppc64le/centos:latest
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,20 @@
# This Dockerfile is used to build the image available on DockerHub
FROM golang:1.17 as build
# Add everything
ADD . /usr/src/multus-cni
ENV GOARCH "s390x"
ENV GOOS "linux"
RUN cd /usr/src/multus-cni && \
./hack/build-go.sh
# build s390x container
FROM s390x/python:3-slim
LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/multus-cni
COPY --from=build /usr/src/multus-cni /usr/src/multus-cni
WORKDIR /
ADD ./images/entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -1,30 +0,0 @@
## Development Information
## How to build the multus-cni?
```
git clone https://github.com/intel/multus-cni.git
cd multus-cni
./build
```
## How to run CI tests?
Multus has go unit tests (based on ginkgo framework).The following commands drive CI tests manually in your environment:
```
sudo ./test.sh
```
## Logging Best Practices
Following are multus logging best practices:
* Add `logging.Debugf()` at the begining of functions
* In case of error handling, use `logging.Errorf()` with given error info
* `logging.Panicf()` only be used for critical errors (it should NOT normally be used)
## CI Introduction
TBD

View File

@@ -5,6 +5,7 @@ Following is the example of multus config file, in `/etc/cni/net.d/`.
```
{
"cniVersion": "0.3.1",
"name": "node-cni-network",
"type": "multus",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
@@ -39,17 +40,18 @@ Following is the example of multus config file, in `/etc/cni/net.d/`.
* `confDir` (string, optional): directory for CNI config file that multus reads. default `/etc/cni/multus/net.d`
* `cniDir` (string, optional): Multus CNI data directory, default `/var/lib/cni/multus`
* `binDir` (string, optional): additional directory for CNI plugins which multus calls, in addition to the default (the default is typically set to `/opt/cni/bin`)
* `kubeconfig` (string, optional): kubeconfig file for the out of cluster communication with kube-apiserver. See the example [kubeconfig](https://github.com/intel/multus-cni/blob/master/doc/node-kubeconfig.yaml). If you would like to use CRD (i.e. network attachment definition), this is required
* `kubeconfig` (string, optional): kubeconfig file for the out of cluster communication with kube-apiserver. See the example [kubeconfig](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/node-kubeconfig.yaml). If you would like to use CRD (i.e. network attachment definition), this is required
* `logToStderr` (bool, optional): Enable or disable logging to `STDERR`. Defaults to true.
* `logFile` (string, optional): file path for log file. multus puts log in given file
* `logLevel` (string, optional): logging level ("debug", "error", "verbose", or "panic")
* `namespaceIsolation` (boolean, optional): Enables a security feature where pods are only allowed to access `NetworkAttachmentDefinitions` in the namespace where the pod resides. Defaults to false.
* `capabilities` ({}list, optional): [capabilities](https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md#dynamic-plugin-specific-fields-capabilities--runtime-configuration) supported by at least one of the delegates. (NOTE: Multus only supports portMappings capability for now). See the [example](https://github.com/intel/multus-cni/blob/master/examples/multus-ptp-portmap.conf).
* `readinessindicatorfile`: The path to a file whose existance denotes that the default network is ready
* `capabilities` ({}list, optional): [capabilities](https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md#dynamic-plugin-specific-fields-capabilities--runtime-configuration) supported by at least one of the delegates. (NOTE: Multus only supports portMappings/Bandwidth capability for cluster networks).
* `readinessindicatorfile`: The path to a file whose existence denotes that the default network is ready
User should chose following parameters combination (`clusterNetwork`+`defaultNetworks` or `delegates`):
* `clusterNetwork` (string, required): default CNI network for pods, used in kubernetes cluster (Pod IP and so on): name of network-attachment-definition, CNI json file name (without extention, .conf/.conflist) or directory for CNI config file
* `defaultNetworks` ([]string, required): default CNI network attachment: name of network-attachment-definition, CNI json file name (without extention, .conf/.conflist) or directory for CNI config file
* `clusterNetwork` (string, required): default CNI network for pods, used in kubernetes cluster (Pod IP and so on): name of network-attachment-definition, CNI json file name (without extension, .conf/.conflist) or directory for CNI config file
* `defaultNetworks` ([]string, required): default CNI network attachment: name of network-attachment-definition, CNI json file name (without extension, .conf/.conflist) or directory for CNI config file
* `systemNamespaces` ([]string, optional): list of namespaces for Kubernetes system (namespaces listed here will not have `defaultNetworks` added)
* `multusNamespace` (string, optional): namespace for `clusterNetwork`/`defaultNetworks`
* `delegates` ([]map,required): number of delegate details in the Multus
@@ -59,7 +61,7 @@ User should chose following parameters combination (`clusterNetwork`+`defaultNet
Multus will find network for clusterNetwork/defaultNetworks as following sequences:
1. CRD object for given network name, in 'kube-system' namespace
1. CNI json config file in `confDir`. Given name should be without extention, like .conf/.conflist. (e.g. "test" for "test.conf")
1. CNI json config file in `confDir`. Given name should be without extension, like .conf/.conflist. (e.g. "test" for "test.conf"). The given name for `clusterNetwork` should match the value for `name` key in the config file (e.g. `"name": "test"` in "test.conf" when `"clusterNetwork": "test"`)
1. Directory for CNI json config file. Multus will find alphabetically first file for the network
1. Multus failed to find network. Multus raise error message
@@ -75,7 +77,7 @@ In this manner, you may prevent pods from crash looping, and instead wait for th
Only one option is necessary to configure this functionality:
* `readinessindicatorfile`: The path to a file whose existance denotes that the default network is ready.
* `readinessindicatorfile`: The path to a file whose existence denotes that the default network is ready.
*NOTE*: If `readinessindicatorfile` is unset, or is an empty string, this functionality will be disabled, and is disabled by default.
@@ -84,7 +86,15 @@ Only one option is necessary to configure this functionality:
You may wish to enable some enhanced logging for Multus, especially during the process where you're configuring Multus and need to understand what is or isn't working with your particular configuration.
Multus will always log via `STDERR`, which is the standard method by which CNI plugins communicate errors, and these errors are logged by the Kubelet. This method is always enabled.
#### Logging via STDERR
By default, Multus will log via `STDERR`, which is the standard method by which CNI plugins communicate errors, and these errors are logged by the Kubelet.
Optionally, you may disable this method by setting the `logToStderr` option in your CNI configuration:
```
"logToStderr": false,
```
#### Writing to a Log File
@@ -117,6 +127,10 @@ You may configure the logging level by using the `LogLevel` option in your CNI c
The functionality provided by the `namespaceIsolation` configuration option enables a mode where Multus only allows pods to access custom resources (the `NetworkAttachmentDefinitions`) within the namespace where that pod resides. In other words, the `NetworkAttachmentDefinitions` are isolated to usage within the namespace in which they're created.
**NOTE**: The default namespace is special in this scenario. Even with namespace isolation enabled, any pod, in any namespace is allowed to refer to `NetworkAttachmentDefinitions` in the default namespace. This allows you to create commonly used unprivileged `NetworkAttachmentDefinitions` without having to put them in all namespaces. For example, if you had a `NetworkAttachmentDefinition` named `foo` the default namespace, you may reference it in an annotation with: `default/foo`.
**NOTE**: You can also add additional namespaces which can be referred to globally using the `global-namespaces` option (see next section).
For example, if a pod is created in the namespace called `development`, Multus will not allow networks to be attached when defined by custom resources created in a different namespace, say in the `default` network.
Consider the situation where you have a system that has users of different privilege levels -- as an example, a platform which has two administrators: a Senior Administrator and a Junior Administrator. The Senior Administrator may have access to all namespaces, and some network configurations as used by Multus are considered to be privileged in that they allow access to some protected resources available on the network. However, the Junior Administrator has access to only a subset of namespaces, and therefore it should be assumed that the Junior Administrator cannot create pods in their limited subset of namespaces. The `namespaceIsolation` feature provides for this isolation, allowing pods created in given namespaces to only access custom resources in the same namespace as the pod.
@@ -213,7 +227,7 @@ pod/samplepod created
You'll note that pod fails to spawn successfully. If you check the Multus logs, you'll see an entry such as:
```
2018-12-18T21:41:32Z [error] GetPodNetwork: namespace isolation violation: podnamespace: development / target namespace: privileged
2018-12-18T21:41:32Z [error] GetNetworkDelegates: namespace isolation enabled, annotation violates permission, pod is in namespace development but refers to target namespace privileged
```
This error expresses that the pod resides in the namespace named `development` but refers to a `NetworkAttachmentDefinition` outside of that namespace, in this case, the namespace named `privileged`.
@@ -251,6 +265,16 @@ NAME READY STATUS RESTARTS AGE
samplepod 1/1 Running 0 31s
```
### Allow specific namespaces to be used across namespaces when using namespace isolation
The `globalNamespaces` configuration option is only used when `namespaceIsolation` is set to true. `globalNamespaces` specifies a comma-delimited list of namespaces which can be referred to from outside of any given namespace in which a pod resides.
```
"globalNamespaces": "default,namespace-a,namespace-b",
```
Note that when using `globalNamespaces` the `default` namespace must be specified in the list if you wish to use that namespace, when `globalNamespaces` is not set, the `default` namespace is implied to be used across namespaces.
### Specify default cluster network in Pod annotations
Users may also specify the default network for any given pod (via annotation), for cases where there are multiple cluster networks available within a Kubernetes cluster.

52
docs/development.md Normal file
View File

@@ -0,0 +1,52 @@
## Development Information
## How to utilize multus-cni code as library?
Multus now uses [gopkg.in](http://gopkg.in/) to expose its code as library.
You can use following command to import our code into your go code.
```
go get gopkg.in/k8snetworkplumbingwg/multus-cni.v3
```
## How do I submit an issue?
Use GitHub as normally, you'll be presented with an option to submit a issue or enhancement request.
Issues are considered stale after 90 days. After which, the maintainers reserve the right to close an issue.
Typically, we'll tag the submitter and ask for more information if necessary before closing.
If an issue is closed that you don't feel is sufficiently resolved, please feel free to re-open the issue and provide any necessary information.
## How do I build multus-cni?
You can use the built in `./hack/build-go.sh` script!
```
git clone https://github.com/k8snetworkplumbingwg/multus-cni.git
cd multus-cni
./hack/build-go.sh
```
## How do I run CI tests?
Multus has go unit tests (based on ginkgo framework).The following commands drive CI tests manually in your environment:
```
sudo ./scripts/test.sh
```
## What are the best practices for logging?
The following are the best practices for multus logging:
* Add `logging.Debugf()` at the beginning of functions
* In case of error handling, use `logging.Errorf()` with given error info
* `logging.Panicf()` only be used for critical errors (it should NOT normally be used)
## Multus release schedule
On the first maintainer's meeting, twice yearly, after January 1st and July 1st, if a new version has not been tagged, a new version will tagged.

View File

@@ -13,7 +13,7 @@ Generally we recommend two options: Manually place a Multus binary in your `/opt
*Copy Multus Binary into place*
You may acquire the Multus binary via compilation (see the [developer guide](development.md)) or download the a binary from the [GitHub releases](https://github.com/intel/multus-cni/releases) page. Copy multus binary into CNI binary directory, usually `/opt/cni/bin`. Perform this on all nodes in your cluster (master and nodes).
You may acquire the Multus binary via compilation (see the [developer guide](development.md)) or download the a binary from the [GitHub releases](https://github.com/k8snetworkplumbingwg/multus-cni/releases) page. Copy multus binary into CNI binary directory, usually `/opt/cni/bin`. Perform this on all nodes in your cluster (master and nodes).
$ cp multus /opt/cni/bin
@@ -35,7 +35,7 @@ Execute following commands at all Kubernetes nodes (i.e. master and minions)
```
$ mkdir -p /etc/cni/net.d
$ cat >/etc/cni/net.d/30-multus.conf <<EOF
$ cat >/etc/cni/net.d/00-multus.conf <<EOF
{
"name": "multus-cni-network",
"type": "multus",
@@ -264,7 +264,7 @@ $ cat <<EOF > /etc/cni/multus/net.d/macvlan2.conf
### Run pod with network annotation
#### Lauch pod with text annotation
#### Launch pod with text annotation
```
# Execute following command at Kubernetes master
@@ -284,7 +284,7 @@ spec:
EOF
```
#### Lauch pod with text annotation for NetworkAttachmentDefinition in different namespace
#### Launch pod with text annotation for NetworkAttachmentDefinition in different namespace
You can also specify NetworkAttachmentDefinition with its namespace as adding `<namespace>/`
@@ -330,7 +330,7 @@ spec:
EOF
```
#### Lauch pod with text annotation with interface name
#### Launch pod with text annotation with interface name
You can also specify interface name as adding `@<ifname>`.
@@ -352,7 +352,7 @@ spec:
EOF
```
#### Lauch pod with json annotation
#### Launch pod with json annotation
```
# Execute following command at Kubernetes master
@@ -375,7 +375,7 @@ spec:
EOF
```
#### Lauch pod with json annotation for NetworkAttachmentDefinition in different namespace
#### Launch pod with json annotation for NetworkAttachmentDefinition in different namespace
You can also specify NetworkAttachmentDefinition with its namespace as adding `"namespace": "<namespace>"`.
@@ -400,7 +400,7 @@ spec:
EOF
```
#### Lauch pod with json annotation with interface
#### Launch pod with json annotation with interface
You can also specify interface name as adding `"interface": "<ifname>"`.
@@ -469,6 +469,72 @@ $ kubectl exec -it pod-case-06 -- ip -d address
| macvlan1 | macvlan interface (macvlan-conf-1) |
| net2 | macvlan interface (macvlan-conf-2) |
## Specifying a default route for a specific attachment
Typically, the default route for a pod will route traffic over the `eth0` and therefore over the cluster-wide default network. You may wish to specify that a different network attachment will have the default route.
You can achieve this by using the JSON formatted annotation and specifying a `default-route` key.
*NOTE*: It's important that you consider that this may impact some functionality of getting traffic to route over the cluster-wide default network.
For example, we have a this configuration for macvlan:
```
cat <<EOF | kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.2.0/24",
"rangeStart": "192.168.2.200",
"rangeEnd": "192.168.2.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.2.1"
}
}'
EOF
```
We can then create a pod which uses the `default-route` key in the JSON formatted `k8s.v1.cni.cncf.io/networks` annotation.
```
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: '[{
"name": "macvlan-conf",
"default-route": ["192.168.2.1"]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
EOF
```
This will set `192.168.2.1` as the default route over the `net1` interface, such as:
```
$ kubectl exec -it samplepod -- ip route
default via 192.168.2.1 dev net1
10.244.0.0/24 dev eth0 proto kernel scope link src 10.244.0.169
10.244.0.0/16 via 10.244.0.1 dev eth0
```
## Entrypoint Parameters
Multus CNI, when installed using the daemonset-style installation uses an entrypoint script which copies the Multus binary into place, places CNI configurations. This entrypoint takes a variety of parameters for customization.
@@ -501,7 +567,11 @@ This the directory in which the Multus binary will be installed.
--namespace-isolation=false
Setting this option to true enables the Namespace isolation feature, which insists that custom resources must be created in the same namespace as the pods, otherwise it will refuse to attach those definitions as additional interfaces.
Setting this option to true enables the Namespace isolation feature, which insists that custom resources must be created in the same namespace as the pods, otherwise it will refuse to attach those definitions as additional interfaces. See (the configuration guide for more information)[configuration.md].
--global-namespaces=default,foo,bar
The `--global-namespaces` works only when `--namespace-isolation=true`. This takes a comma-separated list of namespaces which can be referred to globally when namespace isolation is enabled. See (the configuration guide for more information)[configuration.md].
--multus-bin-file=/usr/src/multus-cni/bin/multus
@@ -525,16 +595,20 @@ This can be used if you have your CNI configuration stored in an alternate locat
Used only with `--multus-conf-file=auto`. Allows you to specify an alternate path to the Kubeconfig.
--multus-master-cni-file-name=
The `--multus-master-cni-file-name` can be used to select the cni file as the master cni, rather than the first file in cni-conf-dir. For example, `--multus-master-cni-file-name=10-calico.conflist`.
--multus-log-level=
--multus-log-file=
Used only with `--multus-conf-file=auto`. See the documentation for logging for which values are permitted.
Used only with `--multus-conf-file=auto`. Allows you to specify CNI spec version. Please set if you need to speicfy CNI spec version.
Used only with `--multus-conf-file=auto`. Allows you to specify CNI spec version. Please set if you need to specify CNI spec version.
--cni-version=
In some cases, the original CNI configuration that the Multus configuration was generated from (using `--multus-conf-file=auto`) may be used as a sort of semaphor for network readiness -- as this model is used by the Kubelet itself. If you need to disable Multus' availablity, you may wish to clean out the generated configuration file when the source file for autogeneration of the config file is no longer present. You can use this functionality by setting:
In some cases, the original CNI configuration that the Multus configuration was generated from (using `--multus-conf-file=auto`) may be used as a sort of semaphor for network readiness -- as this model is used by the Kubelet itself. If you need to disable Multus' availability, you may wish to clean out the generated configuration file when the source file for autogeneration of the config file is no longer present. You can use this functionality by setting:
--cleanup-config-on-exit=true
@@ -549,3 +623,11 @@ Additionally when using CRIO, you may wish to have the CNI config file that's us
When using `--multus-conf-file=auto` you may also care to specify a `binDir` in the configuration, this can be accomplished using the `--additional-bin-dir` option.
--additional-bin-dir=/opt/multus/bin
Sometimes, you may wish to not have the entrypoint copy the binary file onto the host. Potentially, you have another way to copy in a specific version of Multus, for example. By default, it's always copied, but you may disable the copy with:
--skip-multus-binary-copy=true
If you wish to have auto configuration use the `readinessindicatorfile` in the configuration, you can use the `--readiness-indicator-file` to express which file should be used as the readiness indicator.
--readiness-indicator-file=/path/to/file

View File

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

View File

Before

Width:  |  Height:  |  Size: 190 KiB

After

Width:  |  Height:  |  Size: 190 KiB

View File

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 49 KiB

View File

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 197 KiB

View File

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -1,6 +1,6 @@
# Quickstart Guide
This guide is intended as a way to get you off the ground, and using Multus CNI to create Kubernetes pods with multiple interfaces. If you're already using Multus and need more detail, see the [comprehensive usage guide](how-to-use.md). This document a quickstart and a getting started guide in one, intended for your first run-through of Multus CNI.
This guide is intended as a way to get you off the ground, using Multus CNI to create Kubernetes pods with multiple interfaces. If you're already using Multus and need more detail, see the [comprehensive usage guide](how-to-use.md). This document is a quickstart and a getting started guide in one, intended for your first run-through of Multus CNI.
We'll first install Multus CNI, and then we'll setup some configurations so that you can see how multiple interfaces are created for pods.
@@ -15,11 +15,13 @@ Two things we'll refer to a number of times through this document are:
Our installation method requires that you first have installed Kubernetes and have configured a default network -- that is, a CNI plugin that's used for your pod-to-pod connectivity.
We recommend Kubernetes 1.16 or later.
To install Kubernetes, you may decide to use [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/), or potentially [kubespray](https://github.com/kubernetes-sigs/kubespray).
After installing Kubernetes, you must install a default network CNI plugin. If you're using kubeadm, refer to the "[Installing a pod network add-on](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network)" section in the kubeadm documentation. If it's your first time, we generally recommend using Flannel for the sake of simplicity.
Alternatively, for advanced use cases, for installing Multus and a default network plugin at the same time, you may refer to the [Kubernetes Network Plumbing Group's Reference Deployments](https://github.com/K8sNetworkPlumbingWG/reference-deployment).
Alternatively, for advanced use cases, for installing Multus and a default network plugin at the same time, you may refer to the [Kubernetes Network Plumbing Group's Reference Deployments](https://github.com/k8snetworkplumbingwg/reference-deployment).
To verify that you default network is ready, you may list your Kubernetes nodes with:
@@ -27,7 +29,14 @@ To verify that you default network is ready, you may list your Kubernetes nodes
kubectl get nodes
```
In the case that your default network is ready
In the case that your default network is ready you will see the `STATUS` column also switch to `Ready` for each node.
```
NAME STATUS ROLES AGE VERSION
master-0 Ready master 1h v1.17.1
master-1 Ready master 1h v1.17.1
master-2 Ready master 1h v1.17.1
```
## Installation
@@ -36,26 +45,19 @@ Our recommended quickstart method to deploy Multus is to deploy using a Daemonse
Firstly, clone this GitHub repository.
```
git clone https://github.com/intel/multus-cni.git && cd multus-cni
git clone https://github.com/k8snetworkplumbingwg/multus-cni.git && cd multus-cni
```
If you're using Kubernetes 1.16+, we'll apply a YAML file with `kubectl` from this repo.
We'll apply a YAML file with `kubectl` from this repo.
```
$ cat ./images/multus-daemonset.yml | kubectl apply -f -
```
Or, for Kubernetes versions < 1.16, use the prior version yaml:
```
$ cat ./images/multus-daemonset-pre-1.16.yml | kubectl apply -f -
```
### What the Multus daemonset does
* Starts a Multus daemonset, this runs a pod on each node which places a Multus binary on each node in `/opt/cni/bin`
* Reads the lexigraphically (alphabetically) first configuration file in `/etc/cni/net.d`, and creates a new configuration file for Multus as `/etc/cni/net.d/00-multus.conf`, this configuration is auto-generated and is based on the default network configuration (which is assumed to be the alphabetically first configuration)
* Reads the lexicographically (alphabetically) first configuration file in `/etc/cni/net.d`, and creates a new configuration file for Multus as `/etc/cni/net.d/00-multus.conf`, this configuration is auto-generated and is based on the default network configuration (which is assumed to be the alphabetically first configuration)
* Creates a `/etc/cni/net.d/multus.d` directory on each node with authentication information for Multus to access the Kubernetes API.
@@ -67,7 +69,7 @@ Generally, the first step in validating your installation is to ensure that the
$ kubectl get pods --all-namespaces | grep -i multus
```
You may further validate that it has ran by looking at the `/etc/cni/net.d/` directory and ensure that the alphabetically first
You may further validate that it has ran by looking at the `/etc/cni/net.d/` directory and ensure that the auto-generated `/etc/cni/net.d/00-multus.conf` exists corresponding to the alphabetically first configuration file.
## Creating additional interfaces
@@ -91,11 +93,11 @@ CNI configurations are JSON, and we have a structure here that has a few things
2. `type`: This tells CNI which binary to call on disk. Each CNI plugin is a binary that's called. Typically, these binaries are stored in `/opt/cni/bin` on each node, and CNI executes this binary. In this case we've specified the `loopback` binary (which create a loopback-type network interface). If this is your first time installing Multus, you might want to verify that the plugins that are in the "type" field are actually on disk in the `/opt/cni/bin` directory.
3. `additional`: This field is put here as an example, each CNI plugin can specify whatever configuration parameters they'd like in JSON. These are specific to the binary you're calling in the `type` field.
For an even further example -- take a look at the [bridge CNI plugin README](https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge) which shows additional
For an even further example -- take a look at the [bridge CNI plugin README](https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge) which shows additional details.
If you'd like more information about CNI configuration, you can read [the entire CNI specification](https://github.com/containernetworking/cni/blob/master/SPEC.md). It might also be useful to look at the [CNI reference plugins](https://github.com/containernetworking/plugins) and see how they're configured.
You do not need to reload or refresh the Kubelets when CNI configurations change. These are read on each creation & deletion of pods. So if you change a configuration, it'll apply the next time a pod is created. Existing pods may need to be restarted if they need the new configuration.
You do not need to reload or refresh the Kubelets when CNI configurations change. These are read on each creation & deletion of pods. So if you change a configuration, it'll apply the next time a pod is created. Existing pods may need to be restarted if they need the new configuration.
### Storing a configuration as a Custom Resource
@@ -151,7 +153,7 @@ kubectl describe network-attachment-definitions macvlan-conf
### Creating a pod that attaches an additional interface
We're going to create a pod. This will look familiar as any pod you might have created before, but, we'll have a special `annotations` field -- in this case we'll have an annotation called `k8s.v1.cni.cncf.io/networks`. This field takes a comma delimited list of the names of your `NetworkAttachmentDefinition`s as we created above. Note in the comand below that we have the annotation of `k8s.v1.cni.cncf.io/networks: macvlan-conf` where `macvlan-conf` is the name we used above when we created our configuration.
We're going to create a pod. This will look familiar as any pod you might have created before, but, we'll have a special `annotations` field -- in this case we'll have an annotation called `k8s.v1.cni.cncf.io/networks`. This field takes a comma delimited list of the names of your `NetworkAttachmentDefinition`s as we created above. Note in the command below that we have the annotation of `k8s.v1.cni.cncf.io/networks: macvlan-conf` where `macvlan-conf` is the name we used above when we created our configuration.
Let's go ahead and create a pod (that just sleeps for a really long time) with this command:
@@ -166,18 +168,18 @@ metadata:
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
command: ["/bin/ash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: alpine
EOF
```
You may now inspect the pod and see what interfaces interfaces are attached, like so:
You may now inspect the pod and see what interfaces are attached, like so:
```
$ kubectl exec -it samplepod -- ip a
```
You should note that there's 3 interfaces:
You should note that there are 3 interfaces:
* `lo` a loopback interface
* `eth0` our default network
@@ -189,7 +191,7 @@ For additional confirmation, use `kubectl describe pod samplepod` and there will
```
Annotations: k8s.v1.cni.cncf.io/networks: macvlan-conf
k8s.v1.cni.cncf.io/networks-status:
k8s.v1.cni.cncf.io/network-status:
[{
"name": "cbr0",
"ips": [
@@ -225,8 +227,8 @@ metadata:
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
command: ["/bin/ash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: alpine
EOF
```

18
e2e/README.md Normal file
View File

@@ -0,0 +1,18 @@
## Multus e2e test with kind
### How to test e2e
```
$ git clone https://github.com/k8snetworkplumbingwg/multus-cni.git
$ cd multus-cni/e2e
$ ./get_tools.sh
$ ./setup_cluster.sh
$ ./test-simple-macvlan1.sh
```
### How to teardown cluster
```
$ ./teardown.sh
```

64
e2e/cni-install.yml Normal file
View File

@@ -0,0 +1,64 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cni-install-sh
namespace: kube-system
data:
install_cni.sh: |
cd /tmp
wget https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz
cd /host/opt/cni/bin
tar xvfzp /tmp/cni-plugins-linux-amd64-v0.8.5.tgz
sleep infinite
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: install-cni-plugins
namespace: kube-system
labels:
name: cni-plugins
spec:
selector:
matchLabels:
name: cni-plugins
template:
metadata:
labels:
name: cni-plugins
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
containers:
- name: install-cni-plugins
image: alpine
command: ["/bin/sh", "/scripts/install_cni.sh"]
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni-bin
mountPath: /host/opt/cni/bin
- name: scripts
mountPath: /scripts
volumes:
- name: cni-bin
hostPath:
path: /opt/cni/bin
- name: scripts
configMap:
name: cni-install-sh
items:
- key: install_cni.sh
path: install_cni.sh

57
e2e/default-route1.yml Normal file
View File

@@ -0,0 +1,57 @@
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: default-route-config
spec:
config: '{
"cniVersion": "0.3.1",
"plugins": [
{
"type": "macvlan",
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static"
}
} ]
}'
---
apiVersion: v1
kind: Pod
metadata:
name: default-route-worker1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "default-route-config",
"ips": [ "10.1.1.21/24" ] ,
"default-route": [ "10.1.1.254" ] }
]'
labels:
app: default-route1
spec:
containers:
- name: default-route-worker1
image: centos:8
command: ["/bin/sleep", "10000"]
securityContext:
privileged: true
---
apiVersion: v1
kind: Pod
metadata:
name: default-route-worker2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "default-route-config",
"ips": [ "10.1.1.22/24" ] }
]'
labels:
app: default-route1
spec:
containers:
- name: default-route-worker2
image: centos:8
command: ["/bin/sleep", "10000"]
securityContext:
privileged: true

15
e2e/get_tools.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh
set -o errexit
if [ ! -d bin ]; then
mkdir bin
fi
curl -Lo ./bin/kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.8.1/kind-$(uname)-amd64"
chmod +x ./bin/kind
curl -Lo ./bin/kubectl https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./bin/kubectl
curl -Lo ./bin/koko https://github.com/redhat-nfvpe/koko/releases/download/v0.82/koko_0.82_linux_amd64
chmod +x ./bin/koko
curl -Lo ./bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
chmod +x ./bin/jq

247
e2e/multus-daemonset.yml Normal file
View File

@@ -0,0 +1,247 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "default-cni-network",
"plugins": [
{
"type": "flannel",
"name": "flannel.1",
"delegate": {
"isDefaultGateway": true,
"hairpinMode": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: localhost:5000/multus:e2e
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
# ppc64le support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest-ppc64le
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
memory: "90Mi"
limits:
cpu: "100m"
memory: "90Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

68
e2e/setup_cluster.sh Executable file
View File

@@ -0,0 +1,68 @@
#!/bin/sh
set -o errexit
export PATH=${PATH}:./bin
kind_network='kind'
reg_name='kind-registry'
reg_port='5000'
running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)"
if [ "${running}" != 'true' ]; then
# run registry and push the multus image
docker run -d --restart=always -p "${reg_port}:5000" --name "${reg_name}" registry:2
docker build -t localhost:5000/multus:e2e -f ../deployments/Dockerfile ..
docker push localhost:5000/multus:e2e
fi
reg_host="${reg_name}"
if [ "${kind_network}" = "bridge" ]; then
reg_host="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' "${reg_name}")"
fi
echo "Registry Host: ${reg_host}"
# deploy cluster with kind
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
endpoint = ["http://${reg_host}:${reg_port}"]
nodes:
- role: control-plane
- role: worker
- role: worker
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
containers=$(docker network inspect ${kind_network} -f "{{range .Containers}}{{.Name}} {{end}}")
needs_connect="true"
for c in $containers; do
if [ "$c" = "${reg_name}" ]; then
needs_connect="false"
fi
done
if [ "${needs_connect}" = "true" ]; then
docker network connect "${kind_network}" "${reg_name}" || true
fi
kind export kubeconfig
sudo env PATH=${PATH} koko -d kind-worker,eth1 -d kind-worker2,eth1
sleep 1
kubectl -n kube-system wait --for=condition=available deploy/coredns --timeout=300s
kubectl create -f multus-daemonset.yml
sleep 1
kubectl -n kube-system wait --for=condition=ready -l name=multus pod --timeout=300s
kubectl create -f cni-install.yml
sleep 1
kubectl -n kube-system wait --for=condition=ready -l name=cni-plugins pod --timeout=300s

63
e2e/simple-macvlan1.yml Normal file
View File

@@ -0,0 +1,63 @@
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan1-config
spec:
config: '{
"cniVersion": "0.3.1",
"plugins": [
{
"type": "macvlan",
"capabilities": { "ips": true },
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static"
}
}, {
"type": "tuning"
} ]
}'
---
apiVersion: v1
kind: Pod
metadata:
name: macvlan1-worker1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan1-config",
"ips": [ "10.1.1.11/24" ] }
]'
labels:
app: macvlan
spec:
containers:
- name: macvlan-worker1
image: centos:8
command: ["/bin/sleep", "10000"]
securityContext:
privileged: true
nodeSelector:
kubernetes.io/hostname: kind-worker
---
apiVersion: v1
kind: Pod
metadata:
name: macvlan1-worker2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan1-config",
"ips": [ "10.1.1.12/24" ] }
]'
labels:
app: macvlan
spec:
containers:
- name: macvlan-worker2
image: centos:8
command: ["/bin/sleep", "10000"]
securityContext:
privileged: true
nodeSelector:
kubernetes.io/hostname: kind-worker2

15
e2e/simple-pod.yml Normal file
View File

@@ -0,0 +1,15 @@
---
apiVersion: v1
kind: Pod
metadata:
name: simple-centos1
annotations:
labels:
app: simple
spec:
containers:
- name: simple-centos1
image: centos:8
command: ["/bin/sleep", "10000"]
securityContext:
privileged: true

10
e2e/teardown.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/sh
#set -o errexit
reg_name='kind-registry'
export PATH=${PATH}:./bin
# delete cluster kind
kind delete cluster
docker kill ${reg_name}
docker rm ${reg_name}

44
e2e/test-default-route1.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/bin/sh
set -o errexit
export PATH=${PATH}:./bin
kubectl create -f default-route1.yml
kubectl wait --for=condition=ready -l app=default-route1 --timeout=300s pod
echo "check default-route-worker1 interface: net1"
kubectl exec default-route-worker1 -- ip a show dev net1
echo "check default-route-worker1 interface address: net1"
ipaddr=$(kubectl exec default-route-worker1 -- ip -j a show | jq -r \
'.[]|select(.ifname =="net1")|.addr_info[]|select(.family=="inet").local')
if [ $ipaddr != "10.1.1.21" ]; then
echo "default-route-worker1 IP address is different: ${ipaddr}"
fi
echo "check default-route-worker1 default route"
ipaddr=$(kubectl exec default-route-worker1 -- ip -j route | jq -r \
'.[]|select(.dst=="default")|.gateway')
if [ $ipaddr != "10.1.1.254" ]; then
echo "default-route-worker1 default route is different: ${ipaddr}"
fi
echo "check default-route-worker2 interface: net1"
kubectl exec default-route-worker2 -- ip a show dev net1
echo "check default-route-worker2 interface address: net1"
ipaddr=$(kubectl exec default-route-worker2 -- ip -j a show | jq -r \
'.[]|select(.ifname =="net1")|.addr_info[]|select(.family=="inet").local')
if [ $ipaddr != "10.1.1.22" ]; then
echo "default-route-worker2 IP address is different: ${ipaddr}"
fi
echo "check default-route-worker2 default route"
ipaddr=$(kubectl exec default-route-worker2 -- ip -j route | jq -r \
'.[]|select(.dst=="default")|.gateway')
if [ $ipaddr != "10.244.1.1" ]; then
echo "default-route-worker2 default route is different: ${ipaddr}"
fi
echo "cleanup resources"
kubectl delete -f default-route1.yml

30
e2e/test-simple-macvlan1.sh Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/sh
set -o errexit
export PATH=${PATH}:./bin
kubectl create -f simple-macvlan1.yml
kubectl wait --for=condition=ready -l app=macvlan --timeout=300s pod
echo "check macvlan1-worker1 interface: net1"
kubectl exec macvlan1-worker1 -- ip a show dev net1
echo "check macvlan1-worker1 interface address: net1"
ipaddr=$(kubectl exec macvlan1-worker1 -- ip -j a show | jq -r \
'.[]|select(.ifname =="net1")|.addr_info[]|select(.family=="inet").local')
if [ $ipaddr != "10.1.1.11" ]; then
echo "macvlan1-worker1 IP address is different: ${ipaddr}"
fi
echo "check macvlan1-worker2 interface: net1"
kubectl exec macvlan1-worker2 -- ip a show dev net1
echo "check macvlan1-worker2 interface address: net1"
ipaddr=$(kubectl exec macvlan1-worker2 -- ip -j a show | jq -r \
'.[]|select(.ifname =="net1")|.addr_info[]|select(.family=="inet").local')
if [ $ipaddr != "10.1.1.12" ]; then
echo "macvlan1-worker2 IP address is different: ${ipaddr}"
fi
echo "cleanup resources"
kubectl delete -f simple-macvlan1.yml

10
e2e/test-simple-pod.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/sh
set -o errexit
export PATH=${PATH}:./bin
kubectl create -f simple-pod.yml
kubectl wait --for=condition=ready -l app=simple --timeout=300s pod
echo "cleanup resources"
kubectl delete -f simple-pod.yml

View File

@@ -62,9 +62,9 @@ A sample `cni-configuration.conf` is provided, typically this file is placed in
Primarily in this setup one thing that one should consider are the aspects of the `macvlan-conf.yml`, which is likely specific to the configuration of the node on which this resides.
## Passing down device information
Some CNI plugins require specific device information which maybe pre-allocated by K8s device plugin. This could be indicated by providing `k8s.v1.cni.cncf.io/resourceName` annotaton in its network attachment definition CRD. The file [`examples/sriov-net.yaml`](./sriov-net.yaml) shows an example on how to define a Network attachment definition with specific device allocation information. Multus will get allocated device information and make them available for CNI plugin to work on.
Some CNI plugins require specific device information which maybe pre-allocated by K8s device plugin. This could be indicated by providing `k8s.v1.cni.cncf.io/resourceName` annotation in its network attachment definition CRD. The file [`examples/sriov-net.yaml`](./sriov-net.yaml) shows an example on how to define a Network attachment definition with specific device allocation information. Multus will get allocated device information and make them available for CNI plugin to work on.
In this exmaple (shown below), it is expected that an [SRIOV Device Plugin](https://github.com/intel/sriov-network-device-plugin/) making a pool of SRIOV VFs available to the K8s with `intel.com/sriov` as their resourceName. Any device allocated from this resource pool will be passed down by Multus to the [sriov-cni](https://github.com/intel/sriov-cni/tree/dev/k8s-deviceid-model) plugin in `deviceID` field. This is up to the sriov-cni plugin to capture this information and work with this specific device information.
In this example (shown below), it is expected that an [SRIOV Device Plugin](https://github.com/intel/sriov-network-device-plugin/) making a pool of SRIOV VFs available to the K8s with `intel.com/sriov` as their resourceName. Any device allocated from this resource pool will be passed down by Multus to the [sriov-cni](https://github.com/intel/sriov-cni/tree/dev/k8s-deviceid-model) plugin in `deviceID` field. This is up to the sriov-cni plugin to capture this information and work with this specific device information.
```yaml
apiVersion: "k8s.cni.cncf.io/v1"
@@ -89,6 +89,6 @@ spec:
}
}'
```
The [net-resource-sample-pod.yaml](./net-resource-sample-pod.yaml) is an exmaple Pod manifest file that requesting a SRIOV device from a host which is then configured using the above network attachement definition.
The [sriov-pod.yml](./sriov-pod.yml) is an example Pod manifest file that requesting a SRIOV device from a host which is then configured using the above network attachment definition.
>For further information on how to configure SRIOV Device Plugin and SRIOV-CNI please refer to the links given above.
>For further information on how to configure SRIOV Device Plugin and SRIOV-CNI please refer to the links given above.

View File

@@ -1,19 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus-crd-overpowered
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update

View File

@@ -1,13 +0,0 @@
{
"name": "multus-cni-network",
"type": "multus",
"delegates": [
{
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
],
"kubeconfig": "/etc/kubernetes/kubelet.conf"
}

View File

@@ -1,21 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string

View File

@@ -1,12 +0,0 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: flannel-conf
spec:
config: '{
"cniVersion": "0.3.0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}'

View File

@@ -1,21 +0,0 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.1.0/24",
"rangeStart": "192.168.1.200",
"rangeEnd": "192.168.1.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.1.1"
}
}'

56
examples/macvlan-pod.yml Normal file
View File

@@ -0,0 +1,56 @@
---
# This net-attach-def defines macvlan-conf with
# + ips capabilities to specify ip in pod annotation and
# + mac capabilities to specify mac address in pod annotation
# default gateway is defined as well
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.3.1",
"plugins": [
{
"type": "macvlan",
"capabilities": { "ips": true },
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "static",
"routes": [
{
"dst": "0.0.0.0/0",
"gw": "10.1.1.1"
}
]
}
}, {
"capabilities": { "mac": true },
"type": "tuning"
}
]
}'
---
# Define a pod with macvlan-conf, defined above, with ip address and mac, and
# "gateway" overrides default gateway to use macvlan-conf's one.
# without "gateway" in k8s.v1.cni.cncf.io/networks, default route will be cluster
# network interface, eth0, even tough macvlan-conf has default gateway config.
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf",
"ips": [ "10.1.1.101/24" ],
"mac": "c2:b0:57:49:47:f1",
"gateway": [ "10.1.1.1" ]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
ports:
- containerPort: 80

View File

@@ -1,36 +0,0 @@
{
"name": "multus-cni-network",
"type": "multus"
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "ptp-tuning-conflist",
"plugins": [
{
"dns": {
"nameservers": [
"172.16.1.1"
]
},
"ipMasq": true,
"ipam": {
"subnet": "172.16.0.0/24",
"type": "host-local"
},
"mtu": 512,
"type": "ptp"
},
{
"capabilities": {
"portMappings": true
},
"externalSetMarkChain": "KUBE-MARK-MASQ",
"type": "portmap"
}
]
}
],
}

View File

@@ -1,163 +0,0 @@
# -----------------------------------------------
# - Example Configuration Deployment
# -----------------------------------------------
# - Deploys a .conf file on each node
# - Configured for Multus + Flannel.
# - As well as assets for Flannel
# - Based on https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml
# -----------------------------------------------
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-multus-cfg
namespace: kube-system
labels:
tier: node
app: multus
data:
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"delegates": [
{
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
],
"kubeconfig": "/etc/kubernetes/kubelet.conf"
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
app: multus
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-multus-with-flannel.conf
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: multus-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: multus-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: multus-cfg
configMap:
name: kube-multus-cfg

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: testpod1
labels:
env: test
annotations:
k8s.v1.cni.cncf.io/networks: sriov-net-a
spec:
containers:
- name: appcntr1
image: centos/tools
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
resources:
requests:
intel.com/sriov: '1'
limits:
intel.com/sriov: '1'
restartPolicy: "Never"

View File

@@ -1,30 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
# name must match the spec fields below, and be in the form: <plural>.<group>
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
# group name to use for REST API: /apis/<group>/<version>
group: k8s.cni.cncf.io
# version name to use for REST API: /apis/<group>/<version>
version: v1
# either Namespaced or Cluster
scope: Namespaced
names:
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
plural: network-attachment-definitions
# singular name to be used as an alias on the CLI and for display
singular: network-attachment-definition
# kind is normally the CamelCased singular type. Your resource manifests use this.
kind: NetworkAttachmentDefinition
# shortNames allow shorter string to match your resource on the CLI
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string

View File

@@ -1,16 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: multus
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'

View File

@@ -1,5 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: testns1

View File

@@ -1,72 +0,0 @@
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf-1
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.1.1.101/24" }
]
}
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf-2
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.1.1.102/24" }
]
}
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf-3
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.1.1.103/24" }
]
}
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf-4
spec:
config: '{
"cniVersion": "0.3.0",
"type": "macvlan",
"master": "eth1",
"mode": "bridge",
"ipam": {
"type": "static",
"addresses": [
{ "address": "10.1.1.104/24" }
]
}
}'

View File

@@ -1,19 +0,0 @@
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: vlan-conf-1-1
namespace: testns1
spec:
config: '{
"cniVersion": "0.3.0",
"type": "vlan",
"master": "eth1",
"vlanid": 1,
"ipam": {
"type": "static",
"addresses": [
{ "address": "172.16.1.101/24"
} ]
}
}'

View File

@@ -1,196 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel2
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel2
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel2
subjects:
- kind: ServiceAccount
name: flannel2
namespace: kube-system
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: flannel2
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel2-cfg
namespace: kube-system
labels:
tier: node
app: flannel2
data:
flannel2-conf.json: |
{
"type": "flannel",
"name": "flannel-2",
"subnetFile": "/run/flannel/flannel2.env",
"dataDir": "/var/lib/cni/flannel2",
"delegate": {
"bridge": "kbr1"
}
}
net-conf.json: |
{
"Network": "10.144.0.0/16",
"SubnetLen": 24,
"SubnetMin": "10.144.0.0",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: v1
kind: Pod
metadata:
name: flannel-etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=http://10.1.1.1:12379
- --listen-client-urls=http://0.0.0.0:12379
- --listen-peer-urls=http://localhost:12380
image: quay.io/coreos/etcd:latest
name: etcd
hostNetwork: true
nodeName: kube-master
---
apiVersion: batch/v1
kind: Job
metadata:
name: flannel-etcdctl
namespace: kube-system
spec:
template:
spec:
containers:
- name: flannel-etcdctl
image: quay.io/coreos/etcd:latest
command: ["etcdctl"]
args: ["--endpoints=http://10.1.1.1:12379", "set", "/flannel2/network/config", '{ "Network": "10.5.0.0/16", "Backend": {"Type": "vxlan", "VNI": 2}}']
hostNetwork: true
nodeName: kube-master
restartPolicy: Never
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel2-ds
namespace: kube-system
labels:
tier: node
app: flannel2
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: flannel2
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel2
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/flannel2-conf.json
- /etc/cni/multus/net.d/10-flannel.conf
volumeMounts:
- name: cni
mountPath: /etc/cni/multus/net.d
- name: flannel2-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel2
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --etcd-endpoints=http://10.1.1.1:12379
- -iface=eth1
- -subnet-file=/run/flannel/flannel2.env
- -etcd-prefix=/flannel2/network
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/multus/net.d
- name: flannel2-cfg
configMap:
name: kube-flannel2-cfg
---
apiVersion: "kubernetes.cni.cncf.io/v1"
kind: Network
metadata:
name: flannel-2

View File

@@ -1,30 +0,0 @@
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ptp-tuning-conflist
spec:
config: '{
"cniVersion": "0.3.1",
"name": "ptp-tuning-conflist",
"plugins": [{
"type": "ptp",
"ipMasq": true,
"mtu": 512,
"ipam": {
"type": "host-local",
"subnet": "172.16.0.0/24"
},
"dns": {
"nameservers": ["172.16.1.1"]
}
},
{
"name": "mytuning",
"type": "tuning",
"sysctl": {
"net.core.somaxconn": "500"
}
}
]
}'

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-case-01
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf-1
spec:
containers:
- name: pod-case-01
image: docker.io/centos/tools:latest
command:
- /sbin/init

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-case-02
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf-2" },
{ "name": "vlan-conf-1-1",
"namespace": "testns1",
"interface": "vlan1-1" }
]'
spec:
containers:
- name: pod-case-02
image: docker.io/centos/tools:latest
command:
- /sbin/init

View File

@@ -1,17 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-case-03
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf-3" },
{ "name": "macvlan-conf-4" },
{ "name": "flannel-2" }
]'
spec:
containers:
- name: pod-case-03
image: docker.io/centos/tools:latest
command:
- /sbin/init

View File

@@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-case-04
spec:
containers:
- name: pod-case-04
image: docker.io/centos/tools:latest
command:
- /sbin/init

View File

@@ -1,15 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: pod-case-05
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "ptp-tuning-conflist" }
]'
spec:
containers:
- name: pod-case-05
image: docker.io/centos/tools:latest
command:
- /sbin/init

View File

@@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: dougbtv/centos-network
ports:
- containerPort: 80

View File

@@ -1,21 +0,0 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: sriov-net-a
annotations:
k8s.v1.cni.cncf.io/resourceName: intel.com/sriov
spec:
config: '{
"type": "sriov",
"vlan": 1000,
"ipam": {
"type": "host-local",
"subnet": "10.56.217.0/24",
"rangeStart": "10.56.217.171",
"rangeEnd": "10.56.217.181",
"routes": [{
"dst": "0.0.0.0/0"
}],
"gateway": "10.56.217.1"
}
}'

47
examples/sriov-pod.yml Normal file
View File

@@ -0,0 +1,47 @@
# This net-attach-def defines SR-IOV CNI config
# Please see https://github.com/intel/sriov-cni and https://github.com/intel/sriov-network-device-plugin
# for its detail.
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: sriov-net-a
annotations:
k8s.v1.cni.cncf.io/resourceName: intel.com/sriov
spec:
config: '{
"type": "sriov",
"vlan": 1000,
"ipam": {
"type": "host-local",
"subnet": "10.56.217.0/24",
"rangeStart": "10.56.217.171",
"rangeEnd": "10.56.217.181",
"routes": [{
"dst": "0.0.0.0/0"
}],
"gateway": "10.56.217.1"
}
}'
---
apiVersion: v1
kind: Pod
metadata:
name: testpod1
labels:
env: test
annotations:
k8s.v1.cni.cncf.io/networks: sriov-net-a
spec:
containers:
- name: appcntr1
image: centos/tools
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
resources:
requests:
intel.com/sriov: '1'
limits:
intel.com/sriov: '1'
restartPolicy: "Never"

85
go.mod
View File

@@ -1,42 +1,51 @@
module github.com/intel/multus-cni
module gopkg.in/k8snetworkplumbingwg/multus-cni.v3
go 1.12
go 1.16
require (
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/containernetworking/cni v0.7.1
github.com/containernetworking/plugins v0.8.2
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect
github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e // indirect
github.com/golang/protobuf v1.3.2 // indirect
github.com/google/btree v0.0.0-20160524151835-7d79101e329e // indirect
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 // indirect
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 // indirect
github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1 // indirect
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da // indirect
github.com/onsi/ginkgo v1.9.0
github.com/onsi/gomega v1.6.0
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.1
github.com/spf13/pflag v1.0.1 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.4.0 // indirect
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/sys v0.0.0-20190825160603-fb81701db80f // indirect
golang.org/x/text v0.3.2 // indirect
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d // indirect
google.golang.org/grpc v1.23.0
gopkg.in/inf.v0 v0.9.0 // indirect
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect
k8s.io/api v0.0.0-20180712090710-2d6f90ab1293
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d
k8s.io/client-go v0.0.0-20180718001006-59698c7d9724
k8s.io/klog v0.0.0-20181108234604-8139d8cb77af // indirect
k8s.io/kubernetes v1.13.0
github.com/containernetworking/cni v0.8.1
github.com/containernetworking/plugins v0.9.1
github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.1-0.20210510153419-66a699ae3b05
github.com/onsi/ginkgo v1.12.1
github.com/onsi/gomega v1.10.3
github.com/pkg/errors v0.9.1
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
google.golang.org/grpc v1.27.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
k8s.io/api v0.20.10
k8s.io/apimachinery v0.20.10
k8s.io/client-go v0.20.10
k8s.io/klog v1.0.0
k8s.io/kubelet v0.0.0
k8s.io/kubernetes v1.20.10
)
replace (
github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
k8s.io/api => k8s.io/api v0.20.10
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.10
k8s.io/apimachinery => k8s.io/apimachinery v0.20.10
k8s.io/apiserver => k8s.io/apiserver v0.20.10
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.10
k8s.io/client-go => k8s.io/client-go v0.20.10
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.10
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.10
k8s.io/code-generator => k8s.io/code-generator v0.20.10
k8s.io/component-base => k8s.io/component-base v0.20.10
k8s.io/component-helpers => k8s.io/component-helpers v0.20.10
k8s.io/controller-manager => k8s.io/controller-manager v0.20.10
k8s.io/cri-api => k8s.io/cri-api v0.20.10
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.10
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.10
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.10
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.10
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.10
k8s.io/kubectl => k8s.io/kubectl v0.20.10
k8s.io/kubelet => k8s.io/kubelet v0.20.10
k8s.io/kubernetes => k8s.io/kubernetes v1.20.10
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.10
k8s.io/metrics => k8s.io/metrics v0.20.10
k8s.io/mount-utils => k8s.io/mount-utils v0.20.10
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.10
)

1121
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -9,16 +9,18 @@ fi
# Add version/commit/date into binary
# In case of TravisCI, need to check error code of 'git describe'.
set +e
git describe --tags --abbrev=0 > /dev/null 2>&1
if [ "$?" != "0" ]; then
VERSION="master"
else
VERSION=$(git describe --tags --abbrev=0)
if [ -z "$VERSION" ]; then
set +e
git describe --tags --abbrev=0 > /dev/null 2>&1
if [ "$?" != "0" ]; then
VERSION="master"
else
VERSION=$(git describe --tags --abbrev=0)
fi
set -e
fi
set -e
DATE=$(date --iso-8601=seconds)
COMMIT=$(git rev-parse --verify HEAD)
DATE=$(date -u -d "@${SOURCE_DATE_EPOCH:-$(date +%s)}" --iso-8601=seconds)
COMMIT=${COMMIT:-$(git rev-parse --verify HEAD)}
LDFLAGS="-X main.version=${VERSION:-master} -X main.commit=${COMMIT} -X main.date=${DATE}"
export CGO_ENABLED=0
@@ -27,8 +29,8 @@ if [ "$GO111MODULE" == "off" ]; then
echo "Building plugin without go module"
echo "Warning: this will be deprecated in near future so please use go modules!"
ORG_PATH="github.com/intel"
REPO_PATH="${ORG_PATH}/multus-cni"
ORG_PATH="gopkg.in/k8snetworkplumbingwg"
REPO_PATH="${ORG_PATH}/multus-cni.v3"
if [ ! -h gopath/src/${REPO_PATH} ]; then
mkdir -p gopath/src/${ORG_PATH}
@@ -38,11 +40,15 @@ if [ "$GO111MODULE" == "off" ]; then
export GO15VENDOREXPERIMENT=1
export GOBIN=${PWD}/bin
export GOPATH=${PWD}/gopath
go install -tags no_openssl -ldflags "${LDFLAGS}" "$@" ${REPO_PATH}/multus
go build -o ${PWD}/bin/multus -tags no_openssl -ldflags "${LDFLAGS}" "$@" ${REPO_PATH}/cmd
else
# build with go modules
export GO111MODULE=on
BUILD_ARGS=(-o ${DEST_DIR}/multus -tags no_openssl)
if [ -n "$MODMODE" ]; then
BUILD_ARGS+=(-mod "$MODMODE")
fi
echo "Building plugins"
go build -o ${DEST_DIR}/multus -tags no_openssl -ldflags "${LDFLAGS}" "$@" ./multus
go build ${BUILD_ARGS[*]} -ldflags "${LDFLAGS}" "$@" ./cmd
fi

View File

@@ -5,7 +5,7 @@ set -e
if [ "$GO111MODULE" == "off" ]; then
echo "Warning: this will be deprecated in near future so please use go modules!"
ORG_PATH="github.com/intel"
ORG_PATH="gopkg.in/k8snetworkplumbingwg"
REPO_PATH="${ORG_PATH}/multus-cni"
if [ ! -h gopath/src/${REPO_PATH} ]; then

View File

@@ -65,4 +65,4 @@ Example docker run command:
$ docker run -it -v /opt/cni/bin/:/host/opt/cni/bin/ -v /etc/cni/net.d/:/host/etc/cni/net.d/ --entrypoint=/bin/bash dougbtv/multus
```
Originally inspired by and is a portmanteau of the [Flannel daemonset](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml), the [Calico Daemonset](https://github.com/projectcalico/calico/blob/master/v2.0/getting-started/kubernetes/installation/hosted/k8s-backend-addon-manager/calico-daemonset.yaml), and the [Calico CNI install bash script](https://github.com/projectcalico/cni-plugin/blob/be4df4db2e47aa7378b1bdf6933724bac1f348d0/k8s-install/scripts/install-cni.sh#L104-L153).
Originally inspired by and is a portmanteau of the [Flannel daemonset](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml), the [Calico Daemonset](https://docs.projectcalico.org/manifests/calico.yaml), and the [Calico CNI install bash script](https://github.com/projectcalico/cni-plugin/blob/be4df4db2e47aa7378b1bdf6933724bac1f348d0/k8s-install/scripts/install-cni.sh#L104-L153).

View File

@@ -5,7 +5,6 @@ metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
@@ -13,13 +12,24 @@ spec:
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
config:
type: string
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -39,6 +49,15 @@ rules:
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -135,7 +154,7 @@ spec:
containers:
- name: kube-multus
# crio support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest
image: nfvpe/multus:v3.6
command: ["/entrypoint.sh"]
args:
- "--cni-bin-dir=/host/usr/libexec/cni"

View File

@@ -0,0 +1,232 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "default-cni-network",
"plugins": [
{
"type": "flannel",
"name": "flannel.1",
"delegate": {
"isDefaultGateway": true,
"hairpinMode": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: nfvpe/multus:v3.6
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-bin-dir=/host/home/kubernetes/bin"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/home/kubernetes/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /home/kubernetes/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: multus
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
# ppc64le support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest-ppc64le
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-bin-dir=/host/home/kubernetes/bin"
resources:
requests:
cpu: "100m"
memory: "90Mi"
limits:
cpu: "100m"
memory: "90Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/home/kubernetes/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /home/kubernetes/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

View File

@@ -5,7 +5,6 @@ metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
@@ -13,13 +12,24 @@ spec:
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
config:
type: string
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -39,6 +49,15 @@ rules:
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -134,7 +153,7 @@ spec:
serviceAccountName: multus
containers:
- name: kube-multus
image: nfvpe/multus:v3.2
image: nfvpe/multus:v3.6
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"

View File

@@ -3,19 +3,12 @@
# Always exit on errors.
set -e
# Run a clean up when we exit if configured to do so.
trap cleanup TERM
function cleanup {
if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == "true" ]; then
CONF=$(cat <<-EOF
{Multus configuration intentionally invalidated to prevent pods from being scheduled.}
EOF
)
echo $CONF > $CNI_CONF_DIR/00-multus.conf
log "Multus configuration intentionally invalidated to prevent pods from being scheduled."
fi
# Trap sigterm
function exitonsigterm() {
echo "Trapped sigterm, exiting."
exit 0
}
trap exitonsigterm SIGTERM
# Set our known directories.
CNI_CONF_DIR="/host/etc/cni/net.d"
@@ -25,14 +18,20 @@ MULTUS_CONF_FILE="/usr/src/multus-cni/images/70-multus.conf"
MULTUS_AUTOCONF_DIR="/host/etc/cni/net.d"
MULTUS_BIN_FILE="/usr/src/multus-cni/bin/multus"
MULTUS_KUBECONFIG_FILE_HOST="/etc/cni/net.d/multus.d/multus.kubeconfig"
MULTUS_TEMP_KUBECONFIG="/tmp/multus.kubeconfig"
MULTUS_MASTER_CNI_FILE_NAME=""
MULTUS_NAMESPACE_ISOLATION=false
MULTUS_GLOBAL_NAMESPACES=""
MULTUS_LOG_TO_STDERR=true
MULTUS_LOG_LEVEL=""
MULTUS_LOG_FILE=""
MULTUS_READINESS_INDICATOR_FILE=""
OVERRIDE_NETWORK_NAME=false
MULTUS_CLEANUP_CONFIG_ON_EXIT=false
RESTART_CRIO=false
CRIO_RESTARTED_ONCE=false
RENAME_SOURCE_CONFIG_FILE=false
SKIP_BINARY_COPY=false
# Give help text for parameters.
function usage()
@@ -42,7 +41,9 @@ function usage()
echo -e "will be copied to the corresponding configuration directory. When "
echo -e "'--multus-conf-file=auto' is used, 00-multus.conf will be automatically "
echo -e "generated from the CNI configuration file of the master plugin (the first file "
echo -e "in lexicographical order in cni-conf-dir)."
echo -e "in lexicographical order in cni-conf-dir). When '--multus-master-cni-file-name'"
echo -e "is used, 00-multus.conf will only be automatically generated from the specific"
echo -e "file rather than the first file."
echo -e ""
echo -e "./entrypoint.sh"
echo -e "\t-h --help"
@@ -51,14 +52,19 @@ function usage()
echo -e "\t--cni-version=<cniVersion (e.g. 0.3.1)>"
echo -e "\t--multus-conf-file=$MULTUS_CONF_FILE"
echo -e "\t--multus-bin-file=$MULTUS_BIN_FILE"
echo -e "\t--skip-multus-binary-copy=$SKIP_BINARY_COPY"
echo -e "\t--multus-kubeconfig-file-host=$MULTUS_KUBECONFIG_FILE_HOST"
echo -e "\t--multus-master-cni-file-name=$MULTUS_MASTER_CNI_FILE_NAME (empty by default, example: 10-calico.conflist)"
echo -e "\t--namespace-isolation=$MULTUS_NAMESPACE_ISOLATION"
echo -e "\t--global-namespaces=$MULTUS_GLOBAL_NAMESPACES (used only with --namespace-isolation=true)"
echo -e "\t--multus-autoconfig-dir=$MULTUS_AUTOCONF_DIR (used only with --multus-conf-file=auto)"
echo -e "\t--multus-log-to-stderr=$MULTUS_LOG_TO_STDERR (empty by default, used only with --multus-conf-file=auto)"
echo -e "\t--multus-log-level=$MULTUS_LOG_LEVEL (empty by default, used only with --multus-conf-file=auto)"
echo -e "\t--multus-log-file=$MULTUS_LOG_FILE (empty by default, used only with --multus-conf-file=auto)"
echo -e "\t--override-network-name=false (used only with --multus-conf-file=auto)"
echo -e "\t--cleanup-config-on-exit=false (used only with --multus-conf-file=auto)"
echo -e "\t--rename-conf-file=false (used only with --multus-conf-file=auto)"
echo -e "\t--readiness-indicator-file=$MULTUS_READINESS_INDICATOR_FILE (used only with --multus-conf-file=auto)"
echo -e "\t--additional-bin-dir=$ADDITIONAL_BIN_DIR (adds binDir option to configuration, used only with --multus-conf-file=auto)"
echo -e "\t--restart-crio=false (restarts CRIO after config file is generated)"
}
@@ -78,6 +84,10 @@ function warn()
log "WARN: {$1}"
}
if ! type python3 &> /dev/null; then
alias python=python3
fi
# Parse parameters given as arguments to this script.
while [ "$1" != "" ]; do
PARAM=`echo $1 | awk -F= '{print $1}'`
@@ -105,9 +115,18 @@ while [ "$1" != "" ]; do
--multus-kubeconfig-file-host)
MULTUS_KUBECONFIG_FILE_HOST=$VALUE
;;
--multus-master-cni-file-name)
MULTUS_MASTER_CNI_FILE_NAME=$VALUE
;;
--namespace-isolation)
MULTUS_NAMESPACE_ISOLATION=$VALUE
;;
--global-namespaces)
MULTUS_GLOBAL_NAMESPACES=$VALUE
;;
--multus-log-to-stderr)
MULTUS_LOG_TO_STDERR=$VALUE
;;
--multus-log-level)
MULTUS_LOG_LEVEL=$VALUE
;;
@@ -132,6 +151,12 @@ while [ "$1" != "" ]; do
--additional-bin-dir)
ADDITIONAL_BIN_DIR=$VALUE
;;
--skip-multus-binary-copy)
SKIP_BINARY_COPY=$VALUE
;;
--readiness-indicator-file)
MULTUS_READINESS_INDICATOR_FILE=$VALUE
;;
*)
warn "unknown parameter \"$PARAM\""
;;
@@ -157,8 +182,13 @@ do
done
# Copy files into place and atomically move into final binary name
cp -f $MULTUS_BIN_FILE $CNI_BIN_DIR/_multus
mv -f $CNI_BIN_DIR/_multus $CNI_BIN_DIR/multus
if [ "$SKIP_BINARY_COPY" = false ]; then
cp -f $MULTUS_BIN_FILE $CNI_BIN_DIR/_multus
mv -f $CNI_BIN_DIR/_multus $CNI_BIN_DIR/multus
else
log "Entrypoint skipped copying Multus binary."
fi
if [ "$MULTUS_CONF_FILE" != "auto" ]; then
cp -f $MULTUS_CONF_FILE $CNI_CONF_DIR
fi
@@ -196,16 +226,17 @@ if [ -f "$SERVICE_ACCOUNT_PATH/token" ]; then
# to skip TLS verification for now. We should eventually support
# writing more complete kubeconfig files. This is only used
# if the provided CNI network config references it.
touch $MULTUS_KUBECONFIG
chmod ${KUBECONFIG_MODE:-600} $MULTUS_KUBECONFIG
cat > $MULTUS_KUBECONFIG <<EOF
touch $MULTUS_TEMP_KUBECONFIG
chmod ${KUBECONFIG_MODE:-600} $MULTUS_TEMP_KUBECONFIG
# Write the kubeconfig to a temp file first.
cat > $MULTUS_TEMP_KUBECONFIG <<EOF
# Kubeconfig file for Multus CNI plugin.
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: ${KUBERNETES_SERVICE_PROTOCOL:-https}://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}
server: ${KUBERNETES_SERVICE_PROTOCOL:-https}://[${KUBERNETES_SERVICE_HOST}]:${KUBERNETES_SERVICE_PORT}
$TLS_CFG
users:
- name: multus
@@ -219,6 +250,9 @@ contexts:
current-context: multus-context
EOF
# Atomically move the temp kubeconfig to its permanent home.
mv -f $MULTUS_TEMP_KUBECONFIG $MULTUS_KUBECONFIG
else
warn "Doesn't look like we're running in a kubernetes environment (no serviceaccount token)"
fi
@@ -233,26 +267,24 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
found_master=false
tries=0
while [ $found_master == false ]; do
MASTER_PLUGIN="$(ls $MULTUS_AUTOCONF_DIR | grep -E '\.conf(list)?$' | grep -Ev '00-multus\.conf' | head -1)"
if [ "$MULTUS_MASTER_CNI_FILE_NAME" != "" ]; then
MASTER_PLUGIN="$MULTUS_MASTER_CNI_FILE_NAME"
else
MASTER_PLUGIN="$(ls $MULTUS_AUTOCONF_DIR | grep -E '\.conf(list)?$' | grep -Ev '00-multus\.conf' | head -1)"
fi
if [ "$MASTER_PLUGIN" == "" ]; then
if [ $tries -lt 600 ]; then
if ! (($tries % 5)); then
log "Attemping to find master plugin configuration, attempt $tries"
log "Attempting to find master plugin configuration, attempt $tries"
fi
let "tries+=1"
# See if the Multus configuration file exists, if it does then clean it up.
if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == true ] && [ -f "$CNI_CONF_DIR/00-multus.conf" ]; then
# But first, check if it has the invalidated configuration in it (otherwise we keep doing this over and over.)
if ! grep -q "invalidated" $CNI_CONF_DIR/00-multus.conf; then
cleanup
fi
fi
sleep 1;
else
error "Multus could not be configured: no master plugin was found."
exit 1;
fi
else
log "Using MASTER_PLUGIN: $MASTER_PLUGIN"
found_master=true
@@ -261,6 +293,17 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
ISOLATION_STRING="\"namespaceIsolation\": true,"
fi
GLOBAL_NAMESPACES_STRING=""
if [ ! -z "${MULTUS_GLOBAL_NAMESPACES// }" ]; then
GLOBAL_NAMESPACES_STRING="\"globalNamespaces\": \"$MULTUS_GLOBAL_NAMESPACES\","
fi
LOG_TO_STDERR_STRING=""
if [ "$MULTUS_LOG_TO_STDERR" == false ]; then
LOG_TO_STDERR_STRING="\"logToStderr\": false,"
fi
LOG_LEVEL_STRING=""
if [ ! -z "${MULTUS_LOG_LEVEL// }" ]; then
case "$MULTUS_LOG_LEVEL" in
@@ -295,13 +338,40 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
ADDITIONAL_BIN_DIR_STRING="\"binDir\": \"$ADDITIONAL_BIN_DIR\","
fi
READINESS_INDICATOR_FILE_STRING=""
if [ ! -z "${MULTUS_READINESS_INDICATOR_FILE// }" ]; then
READINESS_INDICATOR_FILE_STRING="\"readinessindicatorfile\": \"$MULTUS_READINESS_INDICATOR_FILE\","
fi
if [ "$OVERRIDE_NETWORK_NAME" == "true" ]; then
MASTER_PLUGIN_NET_NAME="$(cat $MULTUS_AUTOCONF_DIR/$MASTER_PLUGIN | \
python -c 'import json,sys;print json.load(sys.stdin)["name"]')"
python -c 'import json,sys;print(json.load(sys.stdin)["name"])')"
else
MASTER_PLUGIN_NET_NAME="multus-cni-network"
fi
capabilities_python_filter_tmpfile=$(mktemp)
cat << EOF > $capabilities_python_filter_tmpfile
import json,sys
conf = json.load(sys.stdin)
capabilities = {}
if 'plugins' in conf:
for capa in [p['capabilities'] for p in conf['plugins'] if 'capabilities' in p]:
capabilities.update({capability:enabled for (capability,enabled) in capa.items() if enabled})
elif 'capabilities' in conf:
capabilities.update({capability:enabled for (capability,enabled) in conf['capabilities'] if enabled})
if len(capabilities) > 0:
print("""\"capabilities\": """ + json.dumps(capabilities) + ",")
else:
print("")
EOF
NESTED_CAPABILITIES_STRING="$(cat $MULTUS_AUTOCONF_DIR/$MASTER_PLUGIN | \
python $capabilities_python_filter_tmpfile)"
rm $capabilities_python_filter_tmpfile
log "Nested capabilities string: $NESTED_CAPABILITIES_STRING"
MASTER_PLUGIN_LOCATION=$MULTUS_AUTOCONF_DIR/$MASTER_PLUGIN
MASTER_PLUGIN_JSON="$(cat $MASTER_PLUGIN_LOCATION)"
log "Using $MASTER_PLUGIN_LOCATION as a source to generate the Multus configuration"
@@ -310,10 +380,14 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
$CNI_VERSION_STRING
"name": "$MASTER_PLUGIN_NET_NAME",
"type": "multus",
$NESTED_CAPABILITIES_STRING
$ISOLATION_STRING
$GLOBAL_NAMESPACES_STRING
$LOG_TO_STDERR_STRING
$LOG_LEVEL_STRING
$LOG_FILE_STRING
$ADDITIONAL_BIN_DIR_STRING
$READINESS_INDICATOR_FILE_STRING
"kubeconfig": "$MULTUS_KUBECONFIG_FILE_HOST",
"delegates": [
$MASTER_PLUGIN_JSON
@@ -321,7 +395,9 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
}
EOF
)
echo $CONF > $CNI_CONF_DIR/00-multus.conf
tmpfile=$(mktemp)
echo $CONF > $tmpfile
mv $tmpfile $CNI_CONF_DIR/00-multus.conf
log "Config file created @ $CNI_CONF_DIR/00-multus.conf"
echo $CONF
@@ -353,8 +429,17 @@ if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == true ]; then
while true; do
# Check and see if the original master plugin configuration exists...
if [ ! -f "$MASTER_PLUGIN_LOCATION" ]; then
log "Master plugin @ $MASTER_PLUGIN_LOCATION has been deleted. Performing cleanup..."
cleanup
log "Master plugin @ $MASTER_PLUGIN_LOCATION has been deleted. Allowing 45 seconds for its restoration..."
sleep 10
for i in {1..35}
do
if [ -f "$MASTER_PLUGIN_LOCATION" ]; then
log "Master plugin @ $MASTER_PLUGIN_LOCATION was restored. Regenerating given configuration."
break
fi
sleep 1
done
generateMultusConf
log "Continuing watch loop after configuration regeneration..."
fi
@@ -362,5 +447,9 @@ if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == true ]; then
done
else
log "Entering sleep (success)..."
sleep infinity
fi
if tty -s; then
read
else
sleep infinity
fi
fi

View File

@@ -18,12 +18,30 @@ spec:
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
@@ -44,6 +62,15 @@ rules:
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -116,7 +143,7 @@ data:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
@@ -136,8 +163,6 @@ spec:
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
@@ -145,7 +170,7 @@ spec:
containers:
- name: kube-multus
# crio support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
command: ["/entrypoint.sh"]
args:
- "--cni-version=0.3.1"
@@ -172,6 +197,7 @@ spec:
mountPath: /host/usr/libexec/cni
- name: multus-cfg
mountPath: /tmp/multus-conf
terminationGracePeriodSeconds: 10
volumes:
- name: run
hostPath:
@@ -188,72 +214,3 @@ spec:
items:
- key: cni-conf.json
path: 70-multus.conf
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
# crio support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest-ppc64le
command: ["/entrypoint.sh"]
args:
- "--cni-version=0.3.1"
- "--cni-bin-dir=/host/usr/libexec/cni"
- "--multus-conf-file=auto"
- "--restart-crio=true"
resources:
requests:
cpu: "100m"
memory: "90Mi"
limits:
cpu: "100m"
memory: "90Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/usr/libexec/cni
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /usr/libexec/cni
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

View File

@@ -0,0 +1,179 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"cniVersion": "0.3.1",
"name": "default-cni-network",
"plugins": [
{
"type": "flannel",
"name": "flannel.1",
"delegate": {
"isDefaultGateway": true,
"hairpinMode": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
- "--cni-bin-dir=/host/home/kubernetes/bin"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/home/kubernetes/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /home/kubernetes/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf

View File

@@ -18,12 +18,30 @@ spec:
storage: true
schema:
openAPIV3Schema:
description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
Working Group to express the intent for attaching pods to one or more logical or physical
networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this represen
tation of an object. Servers should convert recognized schemas to the
latest internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
type: object
properties:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
---
kind: ClusterRole
@@ -44,6 +62,15 @@ rules:
verbs:
- get
- update
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -116,7 +143,7 @@ data:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
name: kube-multus-ds
namespace: kube-system
labels:
tier: node
@@ -136,15 +163,13 @@ spec:
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: nfvpe/multus:v3.3
image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
@@ -165,73 +190,7 @@ spec:
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-multus-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: multus
name: multus
spec:
selector:
matchLabels:
name: multus
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
name: multus
spec:
hostNetwork: true
nodeSelector:
kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
# ppc64le support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest-ppc64le
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
- "--cni-version=0.3.1"
resources:
requests:
cpu: "100m"
memory: "90Mi"
limits:
cpu: "100m"
memory: "90Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
terminationGracePeriodSeconds: 10
volumes:
- name: cni
hostPath:

View File

@@ -1,710 +0,0 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sclient
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/intel/multus-cni/kubeletclient"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
)
const (
resourceNameAnnot = "k8s.v1.cni.cncf.io/resourceName"
defaultNetAnnot = "v1.multus-cni.io/default-network"
networkAttachmentAnnot = "k8s.v1.cni.cncf.io/networks"
)
// NoK8sNetworkError indicates error, no network in kubernetes
type NoK8sNetworkError struct {
message string
}
// ClientInfo contains information given from k8s client
type ClientInfo struct {
Client KubeClient
Podnamespace string
Podname string
}
func (e *NoK8sNetworkError) Error() string { return string(e.message) }
type defaultKubeClient struct {
client kubernetes.Interface
}
// defaultKubeClient implements KubeClient
var _ KubeClient = &defaultKubeClient{}
func (d *defaultKubeClient) GetRawWithPath(path string) ([]byte, error) {
return d.client.ExtensionsV1beta1().RESTClient().Get().AbsPath(path).DoRaw()
}
func (d *defaultKubeClient) GetPod(namespace, name string) (*v1.Pod, error) {
return d.client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (d *defaultKubeClient) UpdatePodStatus(pod *v1.Pod) (*v1.Pod, error) {
return d.client.Core().Pods(pod.Namespace).UpdateStatus(pod)
}
func setKubeClientInfo(c *ClientInfo, client KubeClient, k8sArgs *types.K8sArgs) {
logging.Debugf("setKubeClientInfo: %v, %v, %v", c, client, k8sArgs)
c.Client = client
c.Podnamespace = string(k8sArgs.K8S_POD_NAMESPACE)
c.Podname = string(k8sArgs.K8S_POD_NAME)
}
// SetNetworkStatus sets network status into Pod annotation
func SetNetworkStatus(client KubeClient, k8sArgs *types.K8sArgs, netStatus []*types.NetworkStatus, conf *types.NetConf) error {
logging.Debugf("SetNetworkStatus: %v, %v, %v, %v", client, k8sArgs, netStatus, conf)
client, err := GetK8sClient(conf.Kubeconfig, client)
if err != nil {
return logging.Errorf("SetNetworkStatus: %v", err)
}
if client == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("must have either Kubernetes config or delegates, refer to Multus documentation for usage instructions")
}
logging.Debugf("SetNetworkStatus: kube client info is not defined, skip network status setup")
return nil
}
podName := string(k8sArgs.K8S_POD_NAME)
podNamespace := string(k8sArgs.K8S_POD_NAMESPACE)
pod, err := client.GetPod(podNamespace, podName)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to query the pod %v in out of cluster comm: %v", podName, err)
}
var networkStatuses string
if netStatus != nil {
var networkStatus []string
for _, status := range netStatus {
data, err := json.MarshalIndent(status, "", " ")
if err != nil {
return logging.Errorf("SetNetworkStatus: error with Marshal Indent: %v", err)
}
networkStatus = append(networkStatus, string(data))
}
networkStatuses = fmt.Sprintf("[%s]", strings.Join(networkStatus, ","))
}
_, err = setPodNetworkAnnotation(client, podNamespace, pod, networkStatuses)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to update the pod %v in out of cluster comm: %v", podName, err)
}
return nil
}
func setPodNetworkAnnotation(client KubeClient, namespace string, pod *v1.Pod, networkstatus string) (*v1.Pod, error) {
logging.Debugf("setPodNetworkAnnotation: %v, %s, %v, %s", client, namespace, pod, networkstatus)
//if pod annotations is empty, make sure it allocatable
if len(pod.Annotations) == 0 {
pod.Annotations = make(map[string]string)
}
pod.Annotations["k8s.v1.cni.cncf.io/networks-status"] = networkstatus
pod = pod.DeepCopy()
var err error
if resultErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err != nil {
// Re-get the pod unless it's the first attempt to update
pod, err = client.GetPod(pod.Namespace, pod.Name)
if err != nil {
return err
}
}
pod, err = client.UpdatePodStatus(pod)
return err
}); resultErr != nil {
return nil, logging.Errorf("status update failed for pod %s/%s: %v", pod.Namespace, pod.Name, resultErr)
}
return pod, nil
}
func parsePodNetworkObjectName(podnetwork string) (string, string, string, error) {
var netNsName string
var netIfName string
var networkName string
logging.Debugf("parsePodNetworkObjectName: %s", podnetwork)
slashItems := strings.Split(podnetwork, "/")
if len(slashItems) == 2 {
netNsName = strings.TrimSpace(slashItems[0])
networkName = slashItems[1]
} else if len(slashItems) == 1 {
networkName = slashItems[0]
} else {
return "", "", "", logging.Errorf("Invalid network object (failed at '/')")
}
atItems := strings.Split(networkName, "@")
networkName = strings.TrimSpace(atItems[0])
if len(atItems) == 2 {
netIfName = strings.TrimSpace(atItems[1])
} else if len(atItems) != 1 {
return "", "", "", logging.Errorf("Invalid network object (failed at '@')")
}
// Check and see if each item matches the specification for valid attachment name.
// "Valid attachment names must be comprised of units of the DNS-1123 label format"
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
// And we allow at (@), and forward slash (/) (units separated by commas)
// It must start and end alphanumerically.
allItems := []string{netNsName, networkName, netIfName}
for i := range allItems {
matched, _ := regexp.MatchString("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", allItems[i])
if !matched && len([]rune(allItems[i])) > 0 {
return "", "", "", logging.Errorf(fmt.Sprintf("Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i]))
}
}
logging.Debugf("parsePodNetworkObjectName: parsed: %s, %s, %s", netNsName, networkName, netIfName)
return netNsName, networkName, netIfName, nil
}
func parsePodNetworkAnnotation(podNetworks, defaultNamespace string) ([]*types.NetworkSelectionElement, error) {
var networks []*types.NetworkSelectionElement
logging.Debugf("parsePodNetworkAnnotation: %s, %s", podNetworks, defaultNamespace)
if podNetworks == "" {
return nil, logging.Errorf("parsePodNetworkAnnotation: pod annotation not having \"network\" as key, refer Multus README.md for the usage guide")
}
if strings.IndexAny(podNetworks, "[{\"") >= 0 {
if err := json.Unmarshal([]byte(podNetworks), &networks); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to parse pod Network Attachment Selection Annotation JSON format: %v", err)
}
} else {
// Comma-delimited list of network attachment object names
for _, item := range strings.Split(podNetworks, ",") {
// Remove leading and trailing whitespace.
item = strings.TrimSpace(item)
// Parse network name (i.e. <namespace>/<network name>@<ifname>)
netNsName, networkName, netIfName, err := parsePodNetworkObjectName(item)
if err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: %v", err)
}
networks = append(networks, &types.NetworkSelectionElement{
Name: networkName,
Namespace: netNsName,
InterfaceRequest: netIfName,
})
}
}
for _, net := range networks {
if net.Namespace == "" {
net.Namespace = defaultNamespace
}
// compatibility pre v3.2, will be removed in v4.0
if net.ObsolatedInterfaceRequest != "" && net.InterfaceRequest == "" {
net.InterfaceRequest = net.ObsolatedInterfaceRequest
}
}
return networks, nil
}
func getCNIConfigFromFile(name string, confdir string) ([]byte, error) {
logging.Debugf("getCNIConfigFromFile: %s, %s", name, confdir)
// In the absence of valid keys in a Spec, the runtime (or
// meta-plugin) should load and execute a CNI .configlist
// or .config (in that order) file on-disk whose JSON
// “name” key matches this Network objects name.
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#getDefaultCNINetwork
files, err := libcni.ConfFiles(confdir, []string{".conf", ".json", ".conflist"})
switch {
case err != nil:
return nil, logging.Errorf("No networks found in %s", confdir)
case len(files) == 0:
return nil, logging.Errorf("No networks found in %s", confdir)
}
for _, confFile := range files {
var confList *libcni.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
return nil, logging.Errorf("Error loading CNI conflist file %s: %v", confFile, err)
}
if confList.Name == name || name == "" {
return confList.Bytes, nil
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
return nil, logging.Errorf("Error loading CNI config file %s: %v", confFile, err)
}
if conf.Network.Name == name || name == "" {
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
return nil, logging.Errorf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
}
return conf.Bytes, nil
}
}
}
return nil, logging.Errorf("no network available in the name %s in cni dir %s", name, confdir)
}
// getCNIConfigFromSpec reads a CNI JSON configuration from the NetworkAttachmentDefinition
// object's Spec.Config field and fills in any missing details like the network name
func getCNIConfigFromSpec(configData, netName string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
logging.Debugf("getCNIConfigFromSpec: %s, %s", configData, netName)
configBytes := []byte(configData)
err = json.Unmarshal(configBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("getCNIConfigFromSpec: failed to unmarshal Spec.Config: %v", err)
}
// Inject network name if missing from Config for the thick plugin case
if n, ok := rawConfig["name"]; !ok || n == "" {
rawConfig["name"] = netName
configBytes, err = json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("getCNIConfigFromSpec: failed to re-marshal Spec.Config: %v", err)
}
}
return configBytes, nil
}
func cniConfigFromNetworkResource(customResource *types.NetworkAttachmentDefinition, confdir string) ([]byte, error) {
var config []byte
var err error
logging.Debugf("cniConfigFromNetworkResource: %v, %s", customResource, confdir)
emptySpec := types.NetworkAttachmentDefinitionSpec{}
if customResource.Spec == emptySpec {
// Network Spec empty; generate delegate from CNI JSON config
// from the configuration directory that has the same network
// name as the custom resource
config, err = getCNIConfigFromFile(customResource.Metadata.Name, confdir)
if err != nil {
return nil, logging.Errorf("cniConfigFromNetworkResource: err in getCNIConfigFromFile: %v", err)
}
} else {
// Config contains a standard JSON-encoded CNI configuration
// or configuration list which defines the plugin chain to
// execute.
config, err = getCNIConfigFromSpec(customResource.Spec.Config, customResource.Metadata.Name)
if err != nil {
return nil, logging.Errorf("cniConfigFromNetworkResource: err in getCNIConfigFromSpec: %v", err)
}
}
return config, nil
}
func getKubernetesDelegate(client KubeClient, net *types.NetworkSelectionElement, confdir string, pod *v1.Pod, resourceMap map[string]*types.ResourceInfo) (*types.DelegateNetConf, map[string]*types.ResourceInfo, error) {
logging.Debugf("getKubernetesDelegate: %v, %v, %s", client, net, confdir)
rawPath := fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", net.Namespace, net.Name)
netData, err := client.GetRawWithPath(rawPath)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get network resource, refer Multus README.md for the usage guide: %v", err)
}
customResource := &types.NetworkAttachmentDefinition{}
if err := json.Unmarshal(netData, customResource); err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get the netplugin data: %v", err)
}
// Get resourceName annotation from NetworkAttachmentDefinition
deviceID := ""
resourceName, ok := customResource.Metadata.Annotations[resourceNameAnnot]
if ok && pod.Name != "" && pod.Namespace != "" {
// ResourceName annotation is found; try to get device info from resourceMap
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
if resourceMap == nil {
ck, err := kubeletclient.GetResourceClient()
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get a ResourceClient instance: %v", err)
}
resourceMap, err = ck.GetPodResourceMap(pod)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get resourceMap from ResourceClient: %v", err)
}
logging.Debugf("getKubernetesDelegate(): resourceMap instance: %+v", resourceMap)
}
entry, ok := resourceMap[resourceName]
if ok {
if idCount := len(entry.DeviceIDs); idCount > 0 && idCount > entry.Index {
deviceID = entry.DeviceIDs[entry.Index]
logging.Debugf("getKubernetesDelegate: podName: %s deviceID: %s", pod.Name, deviceID)
entry.Index++ // increment Index for next delegate
}
}
}
configBytes, err := cniConfigFromNetworkResource(customResource, confdir)
if err != nil {
return nil, resourceMap, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, net, deviceID)
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
// KubeClient is abstraction layer for k8s client (used testing package)
type KubeClient interface {
GetRawWithPath(path string) ([]byte, error)
GetPod(namespace, name string) (*v1.Pod, error)
UpdatePodStatus(pod *v1.Pod) (*v1.Pod, error)
}
// GetK8sArgs gets k8s related args from CNI args
func GetK8sArgs(args *skel.CmdArgs) (*types.K8sArgs, error) {
k8sArgs := &types.K8sArgs{}
logging.Debugf("GetK8sArgs: %v", args)
err := cnitypes.LoadArgs(args.Args, k8sArgs)
if err != nil {
return nil, err
}
return k8sArgs, nil
}
// TryLoadPodDelegates attempts to load Kubernetes-defined delegates and add them to the Multus config.
// Returns the number of Kubernetes-defined delegates added or an error.
func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) (int, *ClientInfo, error) {
var err error
clientInfo := &ClientInfo{}
logging.Debugf("TryLoadPodDelegates: %v, %v, %v", k8sArgs, conf, kubeClient)
kubeClient, err = GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return 0, nil, err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return 0, nil, logging.Errorf("must have either Kubernetes config or delegates, refer Multus README.md for the usage guide")
}
return 0, nil, nil
}
setKubeClientInfo(clientInfo, kubeClient, k8sArgs)
// Get the pod info. If cannot get it, we use cached delegates
pod, err := kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
logging.Debugf("tryLoadK8sDelegates: Err in loading K8s cluster default network from pod annotation: %v, use cached delegates", err)
return 0, nil, nil
}
delegate, err := tryLoadK8sPodDefaultNetwork(kubeClient, pod, conf)
if err != nil {
return 0, nil, logging.Errorf("tryLoadK8sDelegates: Err in loading K8s cluster default network from pod annotation: %v", err)
}
if delegate != nil {
logging.Debugf("tryLoadK8sDelegates: Overwrite the cluster default network with %v from pod annotations", delegate)
conf.Delegates[0] = delegate
}
networks, err := GetPodNetwork(pod)
if networks != nil {
delegates, err := GetNetworkDelegates(kubeClient, pod, networks, conf.ConfDir, conf.NamespaceIsolation)
if err != nil {
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, clientInfo, nil
}
return 0, nil, logging.Errorf("Multus: Err in getting k8s network from pod: %v", err)
}
if err = conf.AddDelegates(delegates); err != nil {
return 0, nil, err
}
return len(delegates), clientInfo, nil
}
return 0, clientInfo, nil
}
// GetK8sClient gets client info from kubeconfig
func GetK8sClient(kubeconfig string, kubeClient KubeClient) (KubeClient, error) {
logging.Debugf("GetK8sClient: %s, %v", kubeconfig, kubeClient)
// If we get a valid kubeClient (eg from testcases) just return that
// one.
if kubeClient != nil {
return kubeClient, nil
}
var err error
var config *rest.Config
// Otherwise try to create a kubeClient from a given kubeConfig
if kubeconfig != "" {
// uses the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for the kubeconfig %v, refer Multus README.md for the usage guide: %v", kubeconfig, err)
}
} else if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
// Try in-cluster config where multus might be running in a kubernetes pod
config, err = rest.InClusterConfig()
if err != nil {
return nil, logging.Errorf("createK8sClient: failed to get context for in-cluster kube config, refer Multus README.md for the usage guide: %v", err)
}
} else {
// No kubernetes config; assume we shouldn't talk to Kube at all
return nil, nil
}
// Specify that we use gRPC
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
config.ContentType = "application/vnd.kubernetes.protobuf"
// creates the clientset
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &defaultKubeClient{client: client}, nil
}
// GetPodNetwork gets net-attach-def annotation from pod
func GetPodNetwork(pod *v1.Pod) ([]*types.NetworkSelectionElement, error) {
logging.Debugf("GetPodNetwork: %v", pod)
netAnnot := pod.Annotations[networkAttachmentAnnot]
defaultNamespace := pod.ObjectMeta.Namespace
if len(netAnnot) == 0 {
return nil, &NoK8sNetworkError{"no kubernetes network found"}
}
networks, err := parsePodNetworkAnnotation(netAnnot, defaultNamespace)
if err != nil {
return nil, err
}
return networks, nil
}
// GetNetworkDelegates returns delegatenetconf from net-attach-def annotation in pod
func GetNetworkDelegates(k8sclient KubeClient, pod *v1.Pod, networks []*types.NetworkSelectionElement, confdir string, confnamespaceIsolation bool) ([]*types.DelegateNetConf, error) {
logging.Debugf("GetNetworkDelegates: %v, %v, %v, %v, %v", k8sclient, pod, networks, confdir, confnamespaceIsolation)
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
// This will only be initialized once and all delegate objects can reference this to look up device info.
var resourceMap map[string]*types.ResourceInfo
// Read all network objects referenced by 'networks'
var delegates []*types.DelegateNetConf
defaultNamespace := pod.ObjectMeta.Namespace
for _, net := range networks {
// The pods namespace (stored as defaultNamespace, does not equal the annotation's target namespace in net.Namespace)
// In the case that this is a mismatch when namespaceisolation is enabled, this should be an error.
if confnamespaceIsolation {
if defaultNamespace != net.Namespace {
return nil, logging.Errorf("GetPodNetwork: namespace isolation violation: podnamespace: %v / target namespace: %v", defaultNamespace, net.Namespace)
}
}
delegate, updatedResourceMap, err := getKubernetesDelegate(k8sclient, net, confdir, pod, resourceMap)
if err != nil {
return nil, logging.Errorf("GetPodNetwork: failed getting the delegate: %v", err)
}
delegates = append(delegates, delegate)
resourceMap = updatedResourceMap
}
return delegates, nil
}
func getDefaultNetDelegateCRD(client KubeClient, net, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getDefaultNetDelegateCRD: %v, %v, %s, %s", client, net, confdir, namespace)
rawPath := fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, net)
netData, err := client.GetRawWithPath(rawPath)
if err != nil {
return nil, logging.Errorf("getDefaultNetDelegateCRD: failed to get network resource, refer Multus README.md for the usage guide: %v", err)
}
customResource := &types.NetworkAttachmentDefinition{}
if err := json.Unmarshal(netData, customResource); err != nil {
return nil, logging.Errorf("getDefaultNetDelegateCRD: failed to get the netplugin data: %v", err)
}
configBytes, err := cniConfigFromNetworkResource(customResource, confdir)
if err != nil {
return nil, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
func getNetDelegate(client KubeClient, netname, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getNetDelegate: %v, %v, %v, %s", client, netname, confdir, namespace)
// option1) search CRD object for the network
delegate, err := getDefaultNetDelegateCRD(client, netname, confdir, namespace)
if err == nil {
return delegate, nil
}
// option2) search CNI json config file
var configBytes []byte
configBytes, err = getCNIConfigFromFile(netname, confdir)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
// option3) search directry
fInfo, err := os.Stat(netname)
if err == nil {
if fInfo.IsDir() {
files, err := libcni.ConfFiles(netname, []string{".conf", ".conflist"})
if err != nil {
return nil, err
}
if len(files) > 0 {
var configBytes []byte
configBytes, err = getCNIConfigFromFile("", netname)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
return nil, err
}
}
}
return nil, logging.Errorf("getNetDelegate: cannot find network: %v", netname)
}
// GetDefaultNetworks parses 'defaultNetwork' config, gets network json and put it into netconf.Delegates.
func GetDefaultNetworks(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) error {
logging.Debugf("GetDefaultNetworks: %v, %v, %v", k8sArgs, conf, kubeClient)
var delegates []*types.DelegateNetConf
kubeClient, err := GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("must have either Kubernetes config or delegates, refer Multus README.md for the usage guide")
}
return nil
}
delegate, err := getNetDelegate(kubeClient, conf.ClusterNetwork, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegate.MasterPlugin = true
delegates = append(delegates, delegate)
// Pod in kube-system namespace does not have default network for now.
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), conf.SystemNamespaces) {
for _, netname := range conf.DefaultNetworks {
delegate, err := getNetDelegate(kubeClient, netname, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegates = append(delegates, delegate)
}
}
if err = conf.AddDelegates(delegates); err != nil {
return err
}
return nil
}
// tryLoadK8sPodDefaultNetwork get pod default network from annotations
func tryLoadK8sPodDefaultNetwork(kubeClient KubeClient, pod *v1.Pod, conf *types.NetConf) (*types.DelegateNetConf, error) {
var netAnnot string
logging.Debugf("tryLoadK8sPodDefaultNetwork: %v, %v, %v", kubeClient, pod, conf)
netAnnot, ok := pod.Annotations[defaultNetAnnot]
if !ok {
logging.Debugf("tryLoadK8sPodDefaultNetwork: Pod default network annotation is not defined")
return nil, nil
}
// The CRD object of default network should only be defined in multusNamespace
networks, err := parsePodNetworkAnnotation(netAnnot, conf.MultusNamespace)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed to parse CRD object: %v", err)
}
if len(networks) > 1 {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: more than one default network is specified: %s", netAnnot)
}
delegate, _, err := getKubernetesDelegate(kubeClient, networks[0], conf.ConfDir, pod, nil)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed getting the delegate: %v", err)
}
delegate.MasterPlugin = true
return delegate, nil
}

View File

@@ -1,558 +0,0 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is a "Multi-plugin".The delegate concept refered from CNI project
// It reads other plugin netconf, and then invoke them, e.g.
// flannel or sriov plugin.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
cniversion "github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ns"
k8s "github.com/intel/multus-cni/k8sclient"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
"github.com/vishvananda/netlink"
"k8s.io/apimachinery/pkg/util/wait"
)
var version = "master@git"
var commit = "unknown commit"
var date = "unknown date"
var defaultReadinessBackoff = wait.Backoff{
Steps: 4,
Duration: 250 * time.Millisecond,
Factor: 4.0,
Jitter: 0.1,
}
func printVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
version, commit, date)
}
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
logging.Debugf("saveScratchNetConf: %s, %s, %s", containerID, dataDir, string(netconf))
if err := os.MkdirAll(dataDir, 0700); err != nil {
return logging.Errorf("failed to create the multus data directory(%q): %v", dataDir, err)
}
path := filepath.Join(dataDir, containerID)
err := ioutil.WriteFile(path, netconf, 0600)
if err != nil {
return logging.Errorf("failed to write container data in the path(%q): %v", path, err)
}
return err
}
func consumeScratchNetConf(containerID, dataDir string) ([]byte, string, error) {
logging.Debugf("consumeScratchNetConf: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
b, err := ioutil.ReadFile(path)
return b, path, err
}
func getIfname(delegate *types.DelegateNetConf, argif string, idx int) string {
logging.Debugf("getIfname: %v, %s, %d", delegate, argif, idx)
if delegate.IfnameRequest != "" {
return delegate.IfnameRequest
}
if delegate.MasterPlugin {
// master plugin always uses the CNI-provided interface name
return argif
}
// Otherwise construct a unique interface name from the delegate's
// position in the delegate list
return fmt.Sprintf("net%d", idx)
}
func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetConf) error {
logging.Debugf("saveDelegates: %s, %s, %v", containerID, dataDir, delegates)
delegatesBytes, err := json.Marshal(delegates)
if err != nil {
return logging.Errorf("error serializing delegate netconf: %v", err)
}
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
return logging.Errorf("error in saving the delegates : %v", err)
}
return err
}
func deleteDelegates(containerID, dataDir string) error {
logging.Debugf("deleteDelegates: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
if err := os.Remove(path); err != nil {
return logging.Errorf("error in deleting the delegates : %v", err)
}
return nil
}
func validateIfName(nsname string, ifname string) error {
logging.Debugf("validateIfName: %s, %s", nsname, ifname)
podNs, err := ns.GetNS(nsname)
if err != nil {
return logging.Errorf("no netns: %v", err)
}
err = podNs.Do(func(_ ns.NetNS) error {
_, err := netlink.LinkByName(ifname)
if err != nil {
if err.Error() == "Link not found" {
return nil
}
return err
}
return logging.Errorf("ifname %s is already exist", ifname)
})
return err
}
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append(binDirs, binDir)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return nil, logging.Errorf("error in converting the raw bytes to conflist: %v", err)
}
result, err := cniNet.AddNetworkList(context.Background(), confList, rt)
if err != nil {
return nil, logging.Errorf("error in getting result from AddNetworkList: %v", err)
}
return result, nil
}
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append(binDirs, binDir)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conflist: %v", err)
}
err = cniNet.DelNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetworkList: %v", err)
}
return err
}
func delegateAdd(exec invoke.Exec, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v, %s", exec, ifName, delegate, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("Multus: error in setting CNI_IFNAME")
}
if err := validateIfName(os.Getenv("CNI_NETNS"), ifName); err != nil {
return nil, logging.Errorf("cannot set %q ifname to %q: %v", delegate.Conf.Type, ifName, err)
}
if delegate.MacRequest != "" || delegate.IPRequest != "" {
if cniArgs != "" {
cniArgs = fmt.Sprintf("%s;IgnoreUnknown=true", cniArgs)
} else {
cniArgs = "IgnoreUnknown=true"
}
if delegate.MacRequest != "" {
// validate Mac address
_, err := net.ParseMAC(delegate.MacRequest)
if err != nil {
return nil, logging.Errorf("failed to parse mac address %q", delegate.MacRequest)
}
cniArgs = fmt.Sprintf("%s;MAC=%s", cniArgs, delegate.MacRequest)
logging.Debugf("Set MAC address %q to %q", delegate.MacRequest, ifName)
}
if delegate.IPRequest != "" {
// validate IP address
if strings.Contains(delegate.IPRequest, "/") {
_, _, err := net.ParseCIDR(delegate.IPRequest)
if err != nil {
return nil, logging.Errorf("failed to parse CIDR %q", delegate.MacRequest)
}
} else if net.ParseIP(delegate.IPRequest) == nil {
return nil, logging.Errorf("failed to parse IP address %q", delegate.IPRequest)
}
cniArgs = fmt.Sprintf("%s;IP=%s", cniArgs, delegate.IPRequest)
logging.Debugf("Set IP address %q to %q", delegate.IPRequest, ifName)
}
if os.Setenv("CNI_ARGS", cniArgs) != nil {
return nil, logging.Errorf("cannot set %q mac to %q and ip to %q", delegate.Conf.Type, delegate.MacRequest, delegate.IPRequest)
}
}
var result cnitypes.Result
var err error
if delegate.ConfListPlugin {
result, err = conflistAdd(rt, delegate.Bytes, binDir, exec)
if err != nil {
return nil, logging.Errorf("Multus: error in invoke Conflist add - %q: %v", delegate.ConfList.Name, err)
}
} else {
origpath := os.Getenv("CNI_PATH")
os.Setenv("CNI_PATH", origpath+":"+binDir)
result, err = invoke.DelegateAdd(context.Background(), delegate.Conf.Type, delegate.Bytes, exec)
os.Setenv("CNI_PATH", origpath)
if err != nil {
return nil, logging.Errorf("Multus: error in invoke Delegate add - %q: %v", delegate.Conf.Type, err)
}
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
data, _ := json.Marshal(result)
var confName string
if delegate.ConfListPlugin {
confName = delegate.ConfList.Name
} else {
confName = delegate.Conf.Name
}
logging.Verbosef("Add: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(data))
}
return result, nil
}
func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delegateDel: %v, %s, %v, %v, %s", exec, ifName, delegateConf, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("Multus: error in setting CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var confName string
if delegateConf.ConfListPlugin {
confName = delegateConf.ConfList.Name
} else {
confName = delegateConf.Conf.Name
}
logging.Verbosef("Del: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistDel(rt, delegateConf.Bytes, binDir, exec)
if err != nil {
return logging.Errorf("Multus: error in invoke Conflist Del - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
origpath := os.Getenv("CNI_PATH")
os.Setenv("CNI_PATH", origpath+":"+binDir)
if err = invoke.DelegateDel(context.Background(), delegateConf.Conf.Type, delegateConf.Bytes, exec); err != nil {
os.Setenv("CNI_PATH", origpath)
return logging.Errorf("Multus: error in invoke Delegate del - %q: %v", delegateConf.Conf.Type, err)
}
os.Setenv("CNI_PATH", origpath)
}
return err
}
func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateNetConf, lastIdx int, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delPlugins: %v, %s, %v, %d, %v, %s", exec, argIfname, delegates, lastIdx, rt, binDir)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("Multus: error in setting CNI_COMMAND to DEL")
}
var errorstrings []string
for idx := lastIdx; idx >= 0; idx-- {
ifName := getIfname(delegates[idx], argIfname, idx)
rt.IfName = ifName
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, ifName, delegates[idx], rt, binDir); err != nil {
errorstrings = append(errorstrings, err.Error())
}
}
// Check if we had any errors, and send them all back.
if len(errorstrings) > 0 {
return fmt.Errorf(strings.Join(errorstrings, " / "))
}
return nil
}
func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cnitypes.Result, error) {
n, err := types.LoadNetConf(args.StdinData)
logging.Debugf("cmdAdd: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return nil, logging.Errorf("err in loading netconf: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, logging.Errorf("Multus: Err in getting k8s args: %v", err)
}
wait.ExponentialBackoff(defaultReadinessBackoff, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
switch {
case err == nil:
return true, nil
default:
return false, nil
}
})
if n.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: Failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
_, kc, err := k8s.TryLoadPodDelegates(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: Err in loading K8s Delegates k8s args: %v", err)
}
// cache the multus config
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, logging.Errorf("Multus: Err in saving the delegates: %v", err)
}
var result, tmpResult cnitypes.Result
var netStatus []*types.NetworkStatus
cniArgs := os.Getenv("CNI_ARGS")
for idx, delegate := range n.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
rt := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, n.RuntimeConfig)
tmpResult, err = delegateAdd(exec, ifName, delegate, rt, n.BinDir, cniArgs)
if err != nil {
// If the add failed, tear down all networks we already added
netName := delegate.Conf.Name
if netName == "" {
netName = delegate.ConfList.Name
}
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, args.IfName, n.Delegates, idx, rt, n.BinDir)
return nil, logging.Errorf("Multus: Err adding pod to network %q: %v", netName, err)
}
// Master plugin result is always used if present
if delegate.MasterPlugin || result == nil {
result = tmpResult
}
//create the network status, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(kc.Podnamespace, n.SystemNamespaces) {
delegateNetStatus, err := types.LoadNetworkStatus(tmpResult, delegate.Conf.Name, delegate.MasterPlugin)
if err != nil {
return nil, logging.Errorf("Multus: Err in setting network status: %v", err)
}
netStatus = append(netStatus, delegateNetStatus)
}
}
}
//set the network status annotation in apiserver, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(kc.Podnamespace, n.SystemNamespaces) {
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
if err != nil {
return nil, logging.Errorf("Multus: Err set the networks status: %v", err)
}
}
}
return result, nil
}
func cmdGet(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cnitypes.Result, error) {
logging.Debugf("cmdGet: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return nil, err
}
// FIXME: call all delegates
return in.PrevResult, nil
}
func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) error {
logging.Debugf("cmdDel: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return err
}
netnsfound := true
netns, err := ns.GetNS(args.Netns)
if err != nil {
// if NetNs is passed down by the Cloud Orchestration Engine, or if it called multiple times
// so don't return an error if the device is already removed.
// https://github.com/kubernetes/kubernetes/issues/43014#issuecomment-287164444
_, ok := err.(ns.NSPathNotExistErr)
if ok {
netnsfound = false
logging.Debugf("cmdDel: WARNING netns may not exist, netns: %s, err: %s", args.Netns, err)
} else {
return fmt.Errorf("failed to open netns %q: %v", netns, err)
}
}
if netns != nil {
defer netns.Close()
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return logging.Errorf("Multus: Err in getting k8s args: %v", err)
}
// Read the cache to get delegates json for the pod
netconfBytes, path, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
if err != nil {
// Fetch delegates again if cache is not exist
if os.IsNotExist(err) {
if in.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, in, kubeClient)
if err != nil {
return logging.Errorf("Multus: Failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// Get pod annotation and so on
_, _, err := k8s.TryLoadPodDelegates(k8sArgs, in, kubeClient)
if err != nil {
if len(in.Delegates) == 0 {
// No delegate available so send error
return logging.Errorf("Multus: failed to get delegates: %v", err)
}
// Get clusterNetwork before, so continue to delete
logging.Errorf("Multus: failed to get delegates: %v, but continue to delete clusterNetwork", err)
}
} else {
return logging.Errorf("Multus: Err in reading the delegates: %v", err)
}
} else {
defer os.Remove(path)
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return logging.Errorf("Multus: failed to load netconf: %v", err)
}
// check plugins field and enable ConfListPlugin if there is
for _, v := range in.Delegates {
if len(v.ConfList.Plugins) != 0 {
v.ConfListPlugin = true
}
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// set CNIVersion in delegate CNI config if there is no CNIVersion and multus conf have CNIVersion.
for _, v := range in.Delegates {
if v.ConfListPlugin == true && v.ConfList.CNIVersion == "" && in.CNIVersion != "" {
v.ConfList.CNIVersion = in.CNIVersion
v.Bytes, err = json.Marshal(v.ConfList)
}
}
// unset the network status annotation in apiserver, only in case Multus as kubeconfig
if in.Kubeconfig != "" {
if netnsfound {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), in.SystemNamespaces) {
err := k8s.SetNetworkStatus(kubeClient, k8sArgs, nil, in)
if err != nil {
// error happen but continue to delete
logging.Errorf("Multus: Err unset the networks status: %v", err)
}
}
} else {
logging.Debugf("WARNING: Unset SetNetworkStatus skipped due to netns not found.")
}
}
rt := types.CreateCNIRuntimeConf(args, k8sArgs, "", in.RuntimeConfig)
return delPlugins(exec, args.IfName, in.Delegates, len(in.Delegates)-1, rt, in.BinDir)
}
func main() {
// Init command line flags to clear vendored packages' one, especially in init()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// add version flag
versionOpt := false
flag.BoolVar(&versionOpt, "version", false, "Show application version")
flag.BoolVar(&versionOpt, "v", false, "Show application version")
flag.Parse()
if versionOpt == true {
fmt.Printf("%s\n", printVersionString())
return
}
skel.PluginMain(
func(args *skel.CmdArgs) error {
result, err := cmdAdd(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error {
result, err := cmdGet(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error { return cmdDel(args, nil, nil) },
cniversion.All, "meta-plugin that delegates to other CNI plugins")
}

View File

@@ -19,8 +19,8 @@ import (
"encoding/json"
"io/ioutil"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
)
@@ -64,7 +64,7 @@ func getCheckpoint(filePath string) (types.ResourceClient, error) {
if err != nil {
return nil, err
}
logging.Debugf("getCheckpoint(): created checkpoint instance with file: %s", filePath)
logging.Debugf("getCheckpoint: created checkpoint instance with file: %s", filePath)
return cp, nil
}
@@ -74,15 +74,15 @@ func (cp *checkpoint) getPodEntries() error {
cpd := &checkpointFileData{}
rawBytes, err := ioutil.ReadFile(cp.fileName)
if err != nil {
return logging.Errorf("getPodEntries(): error reading file %s\n%v\n", checkPointfile, err)
return logging.Errorf("getPodEntries: error reading file %s\n%v\n", checkPointfile, err)
}
if err = json.Unmarshal(rawBytes, cpd); err != nil {
return logging.Errorf("getPodEntries(): error unmarshalling raw bytes %v", err)
return logging.Errorf("getPodEntries: error unmarshalling raw bytes %v", err)
}
cp.podEntires = cpd.Data.PodDeviceEntries
logging.Debugf("getPodEntries(): podEntires %+v", cp.podEntires)
logging.Debugf("getPodEntries: podEntires %+v", cp.podEntires)
return nil
}
@@ -92,7 +92,7 @@ func (cp *checkpoint) GetPodResourceMap(pod *v1.Pod) (map[string]*types.Resource
resourceMap := make(map[string]*types.ResourceInfo)
if podID == "" {
return nil, logging.Errorf("GetPodResourceMap(): invalid Pod cannot be empty")
return nil, logging.Errorf("GetPodResourceMap: invalid Pod cannot be empty")
}
for _, pod := range cp.podEntires {
if pod.PodUID == podID {

View File

@@ -10,7 +10,7 @@ import (
"io/ioutil"
"testing"
"github.com/intel/multus-cni/types"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sTypes "k8s.io/apimachinery/pkg/types"

613
pkg/k8sclient/k8sclient.go Normal file
View File

@@ -0,0 +1,613 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sclient
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
netclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1"
netutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/kubeletclient"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
)
const (
resourceNameAnnot = "k8s.v1.cni.cncf.io/resourceName"
defaultNetAnnot = "v1.multus-cni.io/default-network"
networkAttachmentAnnot = "k8s.v1.cni.cncf.io/networks"
)
// NoK8sNetworkError indicates error, no network in kubernetes
type NoK8sNetworkError struct {
message string
}
// ClientInfo contains information given from k8s client
type ClientInfo struct {
Client kubernetes.Interface
NetClient netclient.K8sCniCncfIoV1Interface
EventBroadcaster record.EventBroadcaster
EventRecorder record.EventRecorder
}
// AddPod adds pod into kubernetes
func (c *ClientInfo) AddPod(pod *v1.Pod) (*v1.Pod, error) {
return c.Client.CoreV1().Pods(pod.ObjectMeta.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
}
// GetPod gets pod from kubernetes
func (c *ClientInfo) GetPod(namespace, name string) (*v1.Pod, error) {
return c.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
// DeletePod deletes a pod from kubernetes
func (c *ClientInfo) DeletePod(namespace, name string) error {
return c.Client.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
// AddNetAttachDef adds net-attach-def into kubernetes
func (c *ClientInfo) AddNetAttachDef(netattach *nettypes.NetworkAttachmentDefinition) (*nettypes.NetworkAttachmentDefinition, error) {
return c.NetClient.NetworkAttachmentDefinitions(netattach.ObjectMeta.Namespace).Create(context.TODO(), netattach, metav1.CreateOptions{})
}
// Eventf puts event into kubernetes events
func (c *ClientInfo) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if c != nil && c.EventRecorder != nil {
c.EventRecorder.Eventf(object, eventtype, reason, messageFmt, args...)
}
}
func (e *NoK8sNetworkError) Error() string { return string(e.message) }
// SetNetworkStatus sets network status into Pod annotation
func SetNetworkStatus(client *ClientInfo, k8sArgs *types.K8sArgs, netStatus []nettypes.NetworkStatus, conf *types.NetConf) error {
var err error
logging.Debugf("SetNetworkStatus: %v, %v, %v, %v", client, k8sArgs, netStatus, conf)
client, err = GetK8sClient(conf.Kubeconfig, client)
if err != nil {
return logging.Errorf("SetNetworkStatus: %v", err)
}
if client == nil || client.Client == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("SetNetworkStatus: must have either Kubernetes config or delegates")
}
logging.Debugf("SetNetworkStatus: kube client info is not defined, skip network status setup")
return nil
}
podName := string(k8sArgs.K8S_POD_NAME)
podNamespace := string(k8sArgs.K8S_POD_NAMESPACE)
pod, err := client.GetPod(podNamespace, podName)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to query the pod %v in out of cluster comm: %v", podName, err)
}
if netStatus != nil {
err = netutils.SetNetworkStatus(client.Client, pod, netStatus)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to update the pod %v in out of cluster comm: %v", podName, err)
}
}
return nil
}
func parsePodNetworkObjectName(podnetwork string) (string, string, string, error) {
var netNsName string
var netIfName string
var networkName string
logging.Debugf("parsePodNetworkObjectName: %s", podnetwork)
slashItems := strings.Split(podnetwork, "/")
if len(slashItems) == 2 {
netNsName = strings.TrimSpace(slashItems[0])
networkName = slashItems[1]
} else if len(slashItems) == 1 {
networkName = slashItems[0]
} else {
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Invalid network object (failed at '/')")
}
atItems := strings.Split(networkName, "@")
networkName = strings.TrimSpace(atItems[0])
if len(atItems) == 2 {
netIfName = strings.TrimSpace(atItems[1])
} else if len(atItems) != 1 {
return "", "", "", logging.Errorf("parsePodNetworkObjectName: Invalid network object (failed at '@')")
}
// Check and see if each item matches the specification for valid attachment name.
// "Valid attachment names must be comprised of units of the DNS-1123 label format"
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
// And we allow at (@), and forward slash (/) (units separated by commas)
// It must start and end alphanumerically.
allItems := []string{netNsName, networkName, netIfName}
for i := range allItems {
matched, _ := regexp.MatchString("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", allItems[i])
if !matched && len([]rune(allItems[i])) > 0 {
return "", "", "", logging.Errorf(fmt.Sprintf("parsePodNetworkObjectName: Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i]))
}
}
logging.Debugf("parsePodNetworkObjectName: parsed: %s, %s, %s", netNsName, networkName, netIfName)
return netNsName, networkName, netIfName, nil
}
func parsePodNetworkAnnotation(podNetworks, defaultNamespace string) ([]*types.NetworkSelectionElement, error) {
var networks []*types.NetworkSelectionElement
logging.Debugf("parsePodNetworkAnnotation: %s, %s", podNetworks, defaultNamespace)
if podNetworks == "" {
return nil, logging.Errorf("parsePodNetworkAnnotation: pod annotation does not have \"network\" as key")
}
if strings.IndexAny(podNetworks, "[{\"") >= 0 {
if err := json.Unmarshal([]byte(podNetworks), &networks); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to parse pod Network Attachment Selection Annotation JSON format: %v", err)
}
} else {
// Comma-delimited list of network attachment object names
for _, item := range strings.Split(podNetworks, ",") {
// Remove leading and trailing whitespace.
item = strings.TrimSpace(item)
// Parse network name (i.e. <namespace>/<network name>@<ifname>)
netNsName, networkName, netIfName, err := parsePodNetworkObjectName(item)
if err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: %v", err)
}
networks = append(networks, &types.NetworkSelectionElement{
Name: networkName,
Namespace: netNsName,
InterfaceRequest: netIfName,
})
}
}
for _, n := range networks {
if n.Namespace == "" {
n.Namespace = defaultNamespace
}
if n.MacRequest != "" {
// validate MAC address
if _, err := net.ParseMAC(n.MacRequest); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to mac: %v", err)
}
}
if n.InfinibandGUIDRequest != "" {
// validate GUID address
if _, err := net.ParseMAC(n.InfinibandGUIDRequest); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to validate infiniband GUID: %v", err)
}
}
if n.IPRequest != nil {
for _, ip := range n.IPRequest {
// validate IP address
if strings.Contains(ip, "/") {
if _, _, err := net.ParseCIDR(ip); err != nil {
return nil, logging.Errorf("failed to parse CIDR %q: %v", ip, err)
}
} else if net.ParseIP(ip) == nil {
return nil, logging.Errorf("failed to parse IP address %q", ip)
}
}
}
// compatibility pre v3.2, will be removed in v4.0
if n.DeprecatedInterfaceRequest != "" && n.InterfaceRequest == "" {
n.InterfaceRequest = n.DeprecatedInterfaceRequest
}
}
return networks, nil
}
func getKubernetesDelegate(client *ClientInfo, net *types.NetworkSelectionElement, confdir string, pod *v1.Pod, resourceMap map[string]*types.ResourceInfo) (*types.DelegateNetConf, map[string]*types.ResourceInfo, error) {
logging.Debugf("getKubernetesDelegate: %v, %v, %s, %v, %v", client, net, confdir, pod, resourceMap)
customResource, err := client.NetClient.NetworkAttachmentDefinitions(net.Namespace).Get(context.TODO(), net.Name, metav1.GetOptions{})
if err != nil {
errMsg := fmt.Sprintf("cannot find a network-attachment-definition (%s) in namespace (%s): %v", net.Name, net.Namespace, err)
if client != nil {
client.Eventf(pod, v1.EventTypeWarning, "NoNetworkFound", errMsg)
}
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: " + errMsg)
}
// Get resourceName annotation from NetworkAttachmentDefinition
deviceID := ""
resourceName, ok := customResource.GetAnnotations()[resourceNameAnnot]
if ok && pod.Name != "" && pod.Namespace != "" {
// ResourceName annotation is found; try to get device info from resourceMap
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
if resourceMap == nil {
ck, err := kubeletclient.GetResourceClient("")
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get a ResourceClient instance: %v", err)
}
resourceMap, err = ck.GetPodResourceMap(pod)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get resourceMap from ResourceClient: %v", err)
}
logging.Debugf("getKubernetesDelegate: resourceMap instance: %+v", resourceMap)
}
entry, ok := resourceMap[resourceName]
if ok {
if idCount := len(entry.DeviceIDs); idCount > 0 && idCount > entry.Index {
deviceID = entry.DeviceIDs[entry.Index]
logging.Debugf("getKubernetesDelegate: podName: %s deviceID: %s", pod.Name, deviceID)
entry.Index++ // increment Index for next delegate
}
}
}
configBytes, err := netutils.GetCNIConfig(customResource, confdir)
if err != nil {
return nil, resourceMap, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, net, deviceID, resourceName)
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
// GetK8sArgs gets k8s related args from CNI args
func GetK8sArgs(args *skel.CmdArgs) (*types.K8sArgs, error) {
k8sArgs := &types.K8sArgs{}
logging.Debugf("GetK8sArgs: %v", args)
err := cnitypes.LoadArgs(args.Args, k8sArgs)
if err != nil {
return nil, err
}
return k8sArgs, nil
}
// TryLoadPodDelegates attempts to load Kubernetes-defined delegates and add them to the Multus config.
// Returns the number of Kubernetes-defined delegates added or an error.
func TryLoadPodDelegates(pod *v1.Pod, conf *types.NetConf, clientInfo *ClientInfo, resourceMap map[string]*types.ResourceInfo) (int, *ClientInfo, error) {
var err error
logging.Debugf("TryLoadPodDelegates: %v, %v, %v", pod, conf, clientInfo)
clientInfo, err = GetK8sClient(conf.Kubeconfig, clientInfo)
if err != nil {
return 0, nil, err
}
if clientInfo == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return 0, nil, logging.Errorf("TryLoadPodDelegates: must have either Kubernetes config or delegates")
}
return 0, nil, nil
}
delegate, err := tryLoadK8sPodDefaultNetwork(clientInfo, pod, conf)
if err != nil {
return 0, nil, logging.Errorf("TryLoadPodDelegates: error in loading K8s cluster default network from pod annotation: %v", err)
}
if delegate != nil {
logging.Debugf("TryLoadPodDelegates: Overwrite the cluster default network with %v from pod annotations", delegate)
conf.Delegates[0] = delegate
}
networks, err := GetPodNetwork(pod)
if networks != nil {
delegates, err := GetNetworkDelegates(clientInfo, pod, networks, conf, resourceMap)
if err != nil {
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, clientInfo, nil
}
return 0, nil, logging.Errorf("TryLoadPodDelegates: error in getting k8s network for pod: %v", err)
}
if err = conf.AddDelegates(delegates); err != nil {
return 0, nil, err
}
// Check gatewayRequest is configured in delegates
// and mark its config if gateway filter is required
isGatewayConfigured := false
for _, delegate := range conf.Delegates {
if delegate.GatewayRequest != nil {
isGatewayConfigured = true
break
}
}
if isGatewayConfigured == true {
types.CheckGatewayConfig(conf.Delegates)
}
return len(delegates), clientInfo, nil
}
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, clientInfo, nil
}
return 0, clientInfo, err
}
// GetK8sClient gets client info from kubeconfig
func GetK8sClient(kubeconfig string, kubeClient *ClientInfo) (*ClientInfo, error) {
logging.Debugf("GetK8sClient: %s, %v", kubeconfig, kubeClient)
// If we get a valid kubeClient (eg from testcases) just return that
// one.
if kubeClient != nil {
return kubeClient, nil
}
var err error
var config *rest.Config
// Otherwise try to create a kubeClient from a given kubeConfig
if kubeconfig != "" {
// uses the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for the kubeconfig %v: %v", kubeconfig, err)
}
} else if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
// Try in-cluster config where multus might be running in a kubernetes pod
config, err = rest.InClusterConfig()
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for in-cluster kube config: %v", err)
}
} else {
// No kubernetes config; assume we shouldn't talk to Kube at all
return nil, nil
}
// Specify that we use gRPC
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
config.ContentType = "application/vnd.kubernetes.protobuf"
// Set the config timeout to one minute.
config.Timeout = time.Minute
// creates the clientset
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
netclient, err := netclient.NewForConfig(config)
if err != nil {
return nil, err
}
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(klog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "multus"})
return &ClientInfo{
Client: client,
NetClient: netclient,
EventBroadcaster: broadcaster,
EventRecorder: recorder,
}, nil
}
// GetPodNetwork gets net-attach-def annotation from pod
func GetPodNetwork(pod *v1.Pod) ([]*types.NetworkSelectionElement, error) {
logging.Debugf("GetPodNetwork: %v", pod)
netAnnot := pod.Annotations[networkAttachmentAnnot]
defaultNamespace := pod.ObjectMeta.Namespace
if len(netAnnot) == 0 {
return nil, &NoK8sNetworkError{"no kubernetes network found"}
}
networks, err := parsePodNetworkAnnotation(netAnnot, defaultNamespace)
if err != nil {
return nil, err
}
return networks, nil
}
// GetNetworkDelegates returns delegatenetconf from net-attach-def annotation in pod
func GetNetworkDelegates(k8sclient *ClientInfo, pod *v1.Pod, networks []*types.NetworkSelectionElement, conf *types.NetConf, resourceMap map[string]*types.ResourceInfo) ([]*types.DelegateNetConf, error) {
logging.Debugf("GetNetworkDelegates: %v, %v, %v, %v, %v", k8sclient, pod, networks, conf, resourceMap)
// Read all network objects referenced by 'networks'
var delegates []*types.DelegateNetConf
defaultNamespace := pod.ObjectMeta.Namespace
for _, net := range networks {
// The pods namespace (stored as defaultNamespace, does not equal the annotation's target namespace in net.Namespace)
// In the case that this is a mismatch when namespaceisolation is enabled, this should be an error.
if conf.NamespaceIsolation {
if defaultNamespace != net.Namespace {
// We allow exceptions based on the specified list of non-isolated namespaces (and/or "default" namespace, by default)
if !isValidNamespaceReference(net.Namespace, conf.NonIsolatedNamespaces) {
return nil, logging.Errorf("GetNetworkDelegates: namespace isolation enabled, annotation violates permission, pod is in namespace %v but refers to target namespace %v", defaultNamespace, net.Namespace)
}
}
}
delegate, updatedResourceMap, err := getKubernetesDelegate(k8sclient, net, conf.ConfDir, pod, resourceMap)
if err != nil {
return nil, logging.Errorf("GetNetworkDelegates: failed getting the delegate: %v", err)
}
delegates = append(delegates, delegate)
resourceMap = updatedResourceMap
}
return delegates, nil
}
func isValidNamespaceReference(targetns string, allowednamespaces []string) bool {
for _, eachns := range allowednamespaces {
if eachns == targetns {
return true
}
}
return false
}
func getNetDelegate(client *ClientInfo, pod *v1.Pod, netname, confdir, namespace string, resourceMap map[string]*types.ResourceInfo) (*types.DelegateNetConf, map[string]*types.ResourceInfo, error) {
logging.Debugf("getNetDelegate: %v, %v, %v, %s", client, netname, confdir, namespace)
// option1) search CRD object for the network
net := &types.NetworkSelectionElement{
Name: netname,
Namespace: namespace,
}
delegate, resourceMap, err := getKubernetesDelegate(client, net, confdir, pod, resourceMap)
if err == nil {
return delegate, resourceMap, nil
}
// option2) search CNI json config file
var configBytes []byte
configBytes, err = netutils.GetCNIConfigFromFile(netname, confdir)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "", "")
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
// option3) search directory
fInfo, err := os.Stat(netname)
if err == nil {
if fInfo.IsDir() {
files, err := libcni.ConfFiles(netname, []string{".conf", ".conflist"})
if err != nil {
return nil, resourceMap, err
}
if len(files) > 0 {
var configBytes []byte
configBytes, err = netutils.GetCNIConfigFromFile("", netname)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "", "")
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
return nil, resourceMap, err
}
}
}
return nil, resourceMap, logging.Errorf("getNetDelegate: cannot find network: %v", netname)
}
// GetDefaultNetworks parses 'defaultNetwork' config, gets network json and put it into netconf.Delegates.
func GetDefaultNetworks(pod *v1.Pod, conf *types.NetConf, kubeClient *ClientInfo, resourceMap map[string]*types.ResourceInfo) (map[string]*types.ResourceInfo, error) {
logging.Debugf("GetDefaultNetworks: %v, %v, %v, %v", pod, conf, kubeClient, resourceMap)
var delegates []*types.DelegateNetConf
kubeClient, err := GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return resourceMap, err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return resourceMap, logging.Errorf("GetDefaultNetworks: must have either Kubernetes config or delegates")
}
return resourceMap, nil
}
delegate, resourceMap, err := getNetDelegate(kubeClient, pod, conf.ClusterNetwork, conf.ConfDir, conf.MultusNamespace, resourceMap)
if err != nil {
return resourceMap, logging.Errorf("GetDefaultNetworks: failed to get clusterNetwork %s in namespace %s", conf.ClusterNetwork, conf.MultusNamespace)
}
delegate.MasterPlugin = true
delegates = append(delegates, delegate)
// Pod in kube-system namespace does not have default network for now.
if !types.CheckSystemNamespaces(pod.ObjectMeta.Namespace, conf.SystemNamespaces) {
for _, netname := range conf.DefaultNetworks {
delegate, resourceMap, err := getNetDelegate(kubeClient, pod, netname, conf.ConfDir, conf.MultusNamespace, resourceMap)
if err != nil {
return resourceMap, err
}
delegates = append(delegates, delegate)
}
}
if err = conf.AddDelegates(delegates); err != nil {
return resourceMap, err
}
return resourceMap, nil
}
// tryLoadK8sPodDefaultNetwork get pod default network from annotations
func tryLoadK8sPodDefaultNetwork(kubeClient *ClientInfo, pod *v1.Pod, conf *types.NetConf) (*types.DelegateNetConf, error) {
var netAnnot string
logging.Debugf("tryLoadK8sPodDefaultNetwork: %v, %v, %v", kubeClient, pod, conf)
netAnnot, ok := pod.Annotations[defaultNetAnnot]
if !ok {
logging.Debugf("tryLoadK8sPodDefaultNetwork: Pod default network annotation is not defined")
return nil, nil
}
// The CRD object of default network should only be defined in multusNamespace
networks, err := parsePodNetworkAnnotation(netAnnot, conf.MultusNamespace)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed to parse CRD object: %v", err)
}
if len(networks) > 1 {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: more than one default network is specified: %s", netAnnot)
}
delegate, _, err := getKubernetesDelegate(kubeClient, networks[0], conf.ConfDir, pod, nil)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed getting the delegate: %v", err)
}
delegate.MasterPlugin = true
return delegate, nil
}

View File

@@ -1,57 +1,56 @@
package kubeletclient
import (
"net/url"
"os"
"path/filepath"
"time"
"github.com/intel/multus-cni/checkpoint"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
"golang.org/x/net/context"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/checkpoint"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/util"
)
const (
defaultKubeletSocketFile = "kubelet.sock"
defaultPodResourcesMaxSize = 1024 * 1024 * 16 // 16 Mb
)
var (
kubeletSocket string
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
defaultPodResourcesPath = "/var/lib/kubelet/pod-resources"
)
// GetResourceClient returns an instance of ResourceClient interface initialized with Pod resource information
func GetResourceClient() (types.ResourceClient, error) {
func GetResourceClient(kubeletSocket string) (types.ResourceClient, error) {
if kubeletSocket == "" {
kubeletSocket, _ = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
}
// If Kubelet resource API endpoint exist use that by default
// Or else fallback with checkpoint file
if hasKubeletAPIEndpoint() {
logging.Verbosef("GetResourceClient(): using Kubelet resource API endpoint")
return getKubeletClient()
if hasKubeletAPIEndpoint(kubeletSocket) {
logging.Debugf("GetResourceClient: using Kubelet resource API endpoint")
return getKubeletClient(kubeletSocket)
}
logging.Verbosef("GetResourceClient(): using Kubelet device plugin checkpoint")
logging.Debugf("GetResourceClient: using Kubelet device plugin checkpoint")
return checkpoint.GetCheckpoint()
}
func getKubeletClient() (types.ResourceClient, error) {
func getKubeletClient(kubeletSocket string) (types.ResourceClient, error) {
newClient := &kubeletClient{}
if kubeletSocket == "" {
kubeletSocket = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
kubeletSocket, _ = util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
}
client, conn, err := podresources.GetClient(kubeletSocket, 10*time.Second, defaultPodResourcesMaxSize)
client, conn, err := podresources.GetV1Client(kubeletSocket, 10*time.Second, defaultPodResourcesMaxSize)
if err != nil {
return nil, logging.Errorf("GetResourceClient(): error getting grpc client: %v\n", err)
return nil, logging.Errorf("getKubeletClient: error getting grpc client: %v\n", err)
}
defer conn.Close()
if err := newClient.getPodResources(client); err != nil {
return nil, logging.Errorf("GetResourceClient(): error getting resource client: %v\n", err)
return nil, logging.Errorf("getKubeletClient: error getting pod resources from client: %v\n", err)
}
return newClient, nil
@@ -68,7 +67,7 @@ func (rc *kubeletClient) getPodResources(client podresourcesapi.PodResourcesList
resp, err := client.List(ctx, &podresourcesapi.ListPodResourcesRequest{})
if err != nil {
return logging.Errorf("getPodResources(): %v.Get(_) = _, %v", client, err)
return logging.Errorf("getPodResources: failed to list pod resources, %v.Get(_) = _, %v", client, err)
}
rc.resources = resp.PodResources
@@ -83,7 +82,7 @@ func (rc *kubeletClient) GetPodResourceMap(pod *v1.Pod) (map[string]*types.Resou
ns := pod.Namespace
if name == "" || ns == "" {
return nil, logging.Errorf("GetPodResourcesMap(): Pod name or namespace cannot be empty")
return nil, logging.Errorf("GetPodResourcesMap: Pod name or namespace cannot be empty")
}
for _, pr := range rc.resources {
@@ -102,11 +101,14 @@ func (rc *kubeletClient) GetPodResourceMap(pod *v1.Pod) (map[string]*types.Resou
return resourceMap, nil
}
func hasKubeletAPIEndpoint() bool {
func hasKubeletAPIEndpoint(endpoint string) bool {
u, err := url.Parse(endpoint)
if err != nil {
return false
}
// Check for kubelet resource API socket file
kubeletAPISocket := filepath.Join(defaultPodResourcesPath, defaultKubeletSocketFile)
if _, err := os.Stat(kubeletAPISocket); err != nil {
logging.Verbosef("hasKubeletAPIEndpoint(): error looking up kubelet resource api socket file: %q", err)
if _, err := os.Stat(u.Path); err != nil {
logging.Debugf("hasKubeletAPIEndpoint: error looking up kubelet resource api socket file: %q", err)
return false
}
return true

View File

@@ -15,8 +15,8 @@ import (
k8sTypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/kubelet/util"
mtypes "github.com/intel/multus-cni/types"
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
mtypes "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
)
var (
@@ -29,6 +29,13 @@ type fakeResourceServer struct {
server *grpc.Server
}
/* This is for 1.21.x or later. Uncomment it once we update vendor here!
//TODO: This is stub code for test, but we may need to change for the testing we use this API in the future...
func (m *fakeResourceServer) GetAllocatableResources(ctx context.Context, req *podresourcesapi.AllocatableResourcesRequest) (*podresourcesapi.AllocatableResourcesResponse, error) {
return &podresourcesapi.AllocatableResourcesResponse{}, nil
}
*/
func (m *fakeResourceServer) List(ctx context.Context, req *podresourcesapi.ListPodResourcesRequest) (*podresourcesapi.ListPodResourcesResponse, error) {
podName := "pod-name"
podNamespace := "pod-namespace"
@@ -63,25 +70,27 @@ func TestKubeletclient(t *testing.T) {
RunSpecs(t, "Kubeletclient Suite")
}
var testKubeletSocket string
func setUp() error {
tempSocketDir, err := ioutil.TempDir("", "kubelet-resource-client")
if err != nil {
return err
}
defaultPodResourcesPath = filepath.Join(tempSocketDir, defaultPodResourcesPath)
testingPodResourcesPath := filepath.Join(tempSocketDir, defaultPodResourcesPath)
if err := os.MkdirAll(defaultPodResourcesPath, os.ModeDir); err != nil {
if err := os.MkdirAll(testingPodResourcesPath, os.ModeDir); err != nil {
return err
}
socketDir = defaultPodResourcesPath
socketDir = testingPodResourcesPath
socketName = filepath.Join(socketDir, "kubelet.sock")
testKubeletSocket = socketName
fakeServer = &fakeResourceServer{server: grpc.NewServer()}
podresourcesapi.RegisterPodResourcesListerServer(fakeServer.server, fakeServer)
lis, err := util.CreateListener(socketName)
if err != nil {
return nil
return err
}
go fakeServer.server.Serve(lis)
return nil
@@ -91,10 +100,8 @@ func tearDown(path string) error {
if fakeServer != nil {
fakeServer.server.Stop()
}
if err := os.RemoveAll(path); err != nil {
return err
}
return nil
err := os.RemoveAll(path)
return err
}
var _ = BeforeSuite(func() {
@@ -111,14 +118,12 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
Context("GetResourceClient()", func() {
It("should return no error", func() {
kubeletSocket = socketName
_, err := GetResourceClient()
_, err := GetResourceClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
})
It("should fail with missing file", func() {
kubeletSocket = "sampleSocketString"
_, err := GetResourceClient()
_, err := GetResourceClient("sampleSocketString")
Expect(err).To(HaveOccurred())
})
})
@@ -140,41 +145,11 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
},
},
}
kubeletSocket = socketName
client, err := getKubeletClient()
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
outputRMap := map[string]*mtypes.ResourceInfo{
"resource": &mtypes.ResourceInfo{DeviceIDs: []string{"dev0", "dev1"}},
}
resourceMap, err := client.GetPodResourceMap(fakePod)
Expect(err).NotTo(HaveOccurred())
Expect(resourceMap).ShouldNot(BeNil())
Expect(resourceMap).To(Equal(outputRMap))
})
It("should return no error with empty socket", func() {
podUID := k8sTypes.UID("970a395d-bb3b-11e8-89df-408d5c537d23")
fakePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-name",
Namespace: "pod-namespace",
UID: podUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container-name",
},
},
},
}
kubeletSocket = ""
client, err := getKubeletClient()
Expect(err).NotTo(HaveOccurred())
outputRMap := map[string]*mtypes.ResourceInfo{
"resource": &mtypes.ResourceInfo{DeviceIDs: []string{"dev0", "dev1"}},
"resource": {DeviceIDs: []string{"dev0", "dev1"}},
}
resourceMap, err := client.GetPodResourceMap(fakePod)
Expect(err).NotTo(HaveOccurred())
@@ -183,8 +158,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
})
It("should return an error with garbage socket value", func() {
kubeletSocket = "/badfilepath!?//"
_, err := getKubeletClient()
_, err := getKubeletClient("/badfilepath!?//")
Expect(err).To(HaveOccurred())
})
})
@@ -199,8 +173,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
UID: podUID,
},
}
kubeletSocket = socketName
client, err := getKubeletClient()
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
_, err = client.GetPodResourceMap(fakePod)
Expect(err).To(HaveOccurred())
@@ -217,8 +190,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
UID: podUID,
},
}
kubeletSocket = socketName
client, err := getKubeletClient()
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
_, err = client.GetPodResourceMap(fakePod)
Expect(err).To(HaveOccurred())
@@ -236,8 +208,7 @@ var _ = Describe("Kubelet resource endpoint data read operations", func() {
},
}
kubeletSocket = socketName
client, err := getKubeletClient()
client, err := getKubeletClient(testKubeletSocket)
Expect(err).NotTo(HaveOccurred())
emptyRMap := map[string]*mtypes.ResourceInfo{}

View File

@@ -16,11 +16,13 @@ package logging
import (
"fmt"
"io"
"os"
"strings"
"time"
"github.com/pkg/errors"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
// Level type
@@ -37,7 +39,7 @@ const (
)
var loggingStderr bool
var loggingFp *os.File
var loggingW io.Writer
var loggingLevel Level
const defaultTimestampFormat = time.RFC3339
@@ -69,10 +71,10 @@ func printf(level Level, format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, "\n")
}
if loggingFp != nil {
fmt.Fprintf(loggingFp, header, t.Format(defaultTimestampFormat), level)
fmt.Fprintf(loggingFp, format, a...)
fmt.Fprintf(loggingFp, "\n")
if loggingW != nil {
fmt.Fprintf(loggingW, header, t.Format(defaultTimestampFormat), level)
fmt.Fprintf(loggingW, format, a...)
fmt.Fprintf(loggingW, "\n")
}
}
@@ -92,7 +94,7 @@ func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, a...)
}
// Panicf prints logging plus stack trace. This should be used only for unrecoverble error
// Panicf prints logging plus stack trace. This should be used only for unrecoverable error
func Panicf(format string, a ...interface{}) {
printf(PanicLevel, format, a...)
printf(PanicLevel, "========= Stack trace output ========")
@@ -139,16 +141,18 @@ func SetLogFile(filename string) {
return
}
fp, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
loggingFp = nil
fmt.Fprintf(os.Stderr, "multus logging: cannot open %s", filename)
loggingW = &lumberjack.Logger{
Filename: filename,
MaxSize: 100, // megabytes
MaxBackups: 5,
MaxAge: 5, // days
Compress: true,
}
loggingFp = fp
}
func init() {
loggingStderr = true
loggingFp = nil
loggingW = nil
loggingLevel = PanicLevel
}

View File

@@ -30,25 +30,25 @@ var _ = Describe("logging operations", func() {
BeforeEach(func() {
loggingStderr = false
loggingFp = nil
loggingW = nil
loggingLevel = PanicLevel
})
It("Check file setter with empty", func() {
SetLogFile("")
Expect(loggingFp).To(BeNil())
Expect(loggingW).To(BeNil())
})
It("Check file setter with empty", func() {
SetLogFile("/tmp/foobar.logging")
Expect(loggingFp).NotTo(Equal(nil))
// check file existance
Expect(loggingW).NotTo(Equal(nil))
// check file existence
})
It("Check file setter with bad filepath", func() {
SetLogFile("/invalid/filepath")
Expect(loggingFp).NotTo(Equal(nil))
// check file existance
Expect(loggingW).NotTo(Equal(nil))
// check file existence
})
It("Check loglevel setter", func() {

871
pkg/multus/multus.go Normal file
View File

@@ -0,0 +1,871 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multus
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
cnicurrent "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
"github.com/vishvananda/netlink"
k8s "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/k8sclient"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/netutils"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/types"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
k8snet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
shortPollDuration = 250 * time.Millisecond
shortPollTimeout = 2500 * time.Millisecond
)
var (
version = "master@git"
commit = "unknown commit"
date = "unknown date"
)
var (
pollDuration = 1000 * time.Millisecond
pollTimeout = 45 * time.Second
)
//PrintVersionString ...
func PrintVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
version, commit, date)
}
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
logging.Debugf("saveScratchNetConf: %s, %s, %s", containerID, dataDir, string(netconf))
if err := os.MkdirAll(dataDir, 0700); err != nil {
return logging.Errorf("saveScratchNetConf: failed to create the multus data directory(%q): %v", dataDir, err)
}
path := filepath.Join(dataDir, containerID)
err := ioutil.WriteFile(path, netconf, 0600)
if err != nil {
return logging.Errorf("saveScratchNetConf: failed to write container data in the path(%q): %v", path, err)
}
return err
}
func consumeScratchNetConf(containerID, dataDir string) ([]byte, string, error) {
logging.Debugf("consumeScratchNetConf: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
b, err := ioutil.ReadFile(path)
return b, path, err
}
func getIfname(delegate *types.DelegateNetConf, argif string, idx int) string {
logging.Debugf("getIfname: %v, %s, %d", delegate, argif, idx)
if delegate.IfnameRequest != "" {
return delegate.IfnameRequest
}
if delegate.MasterPlugin {
// master plugin always uses the CNI-provided interface name
return argif
}
// Otherwise construct a unique interface name from the delegate's
// position in the delegate list
return fmt.Sprintf("net%d", idx)
}
func getDelegateDeviceInfo(delegate *types.DelegateNetConf, runtimeConf *libcni.RuntimeConf) (*nettypes.DeviceInfo, error) {
// If the DPDeviceInfoFile was created, it was copied to the CNIDeviceInfoFile.
// If the DPDeviceInfoFile was not created, CNI might have created it. So
// either way, load CNIDeviceInfoFile.
if info, ok := runtimeConf.CapabilityArgs["CNIDeviceInfoFile"]; ok {
if infostr, ok := info.(string); ok {
return nadutils.LoadDeviceInfoFromCNI(infostr)
}
} else {
logging.Debugf("getDelegateDeviceInfo(): No CapArgs - info=%v ok=%v", info, ok)
}
return nil, nil
}
func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetConf) error {
logging.Debugf("saveDelegates: %s, %s, %v", containerID, dataDir, delegates)
delegatesBytes, err := json.Marshal(delegates)
if err != nil {
return logging.Errorf("saveDelegates: error serializing delegate netconf: %v", err)
}
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
return logging.Errorf("saveDelegates: error in saving the delegates : %v", err)
}
return err
}
func deleteDelegates(containerID, dataDir string) error {
logging.Debugf("deleteDelegates: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
if err := os.Remove(path); err != nil {
return logging.Errorf("deleteDelegates: error in deleting the delegates : %v", err)
}
return nil
}
func validateIfName(nsname string, ifname string) error {
logging.Debugf("validateIfName: %s, %s", nsname, ifname)
podNs, err := ns.GetNS(nsname)
if err != nil {
return logging.Errorf("validateIfName: no net namespace %s found: %v", nsname, err)
}
err = podNs.Do(func(_ ns.NetNS) error {
_, err := netlink.LinkByName(ifname)
if err != nil {
if err.Error() == "Link not found" {
return nil
}
return err
}
return logging.Errorf("validateIfName: interface name %s already exists", ifname)
})
return err
}
func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("confAdd: %v, %s", rt, string(rawNetconf))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return nil, logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
result, err := cniNet.AddNetwork(context.Background(), conf, rt)
if err != nil {
return nil, err
}
return result, nil
}
func confCheck(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("confCheck: %v, %s", rt, string(rawNetconf))
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
err = cniNet.CheckNetwork(context.Background(), conf, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetwork: %v", err)
}
return err
}
func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s", rt, string(rawNetconf))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
err = cniNet.DelNetwork(context.Background(), conf, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetwork: %v", err)
}
return err
}
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s", rt, string(rawnetconflist))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return nil, logging.Errorf("conflistAdd: error converting the raw bytes into a conflist: %v", err)
}
result, err := cniNet.AddNetworkList(context.Background(), confList, rt)
if err != nil {
return nil, err
}
return result, nil
}
func conflistCheck(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistCheck: %v, %s", rt, string(rawnetconflist))
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("conflistCheck: error converting the raw bytes into a conflist: %v", err)
}
err = cniNet.CheckNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("conflistCheck: error in getting result from CheckNetworkList: %v", err)
}
return err
}
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf *types.NetConf, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s", rt, string(rawnetconflist))
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{multusNetconf.BinDir}, binDirs...)
cniNet := libcni.NewCNIConfigWithCacheDir(binDirs, multusNetconf.CNIDir, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("conflistDel: error converting the raw bytes into a conflist: %v", err)
}
err = cniNet.DelNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("conflistDel: error in getting result from DelNetworkList: %v", err)
}
return err
}
func delegateAdd(exec invoke.Exec, kubeClient *k8s.ClientInfo, pod *v1.Pod, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v", exec, ifName, delegate, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("delegateAdd: error setting environment variable CNI_IFNAME")
}
if err := validateIfName(os.Getenv("CNI_NETNS"), ifName); err != nil {
return nil, logging.Errorf("delegateAdd: cannot set %q interface name to %q: %v", delegate.Conf.Type, ifName, err)
}
// Deprecated in ver 3.5.
if delegate.MacRequest != "" || delegate.IPRequest != nil {
if cniArgs != "" {
cniArgs = fmt.Sprintf("%s;IgnoreUnknown=true", cniArgs)
} else {
cniArgs = "IgnoreUnknown=true"
}
if delegate.MacRequest != "" {
// validate Mac address
_, err := net.ParseMAC(delegate.MacRequest)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse mac address %q", delegate.MacRequest)
}
cniArgs = fmt.Sprintf("%s;MAC=%s", cniArgs, delegate.MacRequest)
logging.Debugf("delegateAdd: set MAC address %q to %q", delegate.MacRequest, ifName)
rt.Args = append(rt.Args, [2]string{"MAC", delegate.MacRequest})
}
if delegate.IPRequest != nil {
// validate IP address
for _, ip := range delegate.IPRequest {
if strings.Contains(ip, "/") {
_, _, err := net.ParseCIDR(ip)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
} else if net.ParseIP(ip) == nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
}
ips := strings.Join(delegate.IPRequest, ",")
cniArgs = fmt.Sprintf("%s;IP=%s", cniArgs, ips)
logging.Debugf("delegateAdd: set IP address %q to %q", ips, ifName)
rt.Args = append(rt.Args, [2]string{"IP", ips})
}
}
var result cnitypes.Result
var err error
if delegate.ConfListPlugin {
result, err = conflistAdd(rt, delegate.Bytes, multusNetconf, exec)
if err != nil {
return nil, err
}
} else {
result, err = confAdd(rt, delegate.Bytes, multusNetconf, exec)
if err != nil {
return nil, err
}
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
data, _ := json.Marshal(result)
var cniConfName string
if delegate.ConfListPlugin {
cniConfName = delegate.ConfList.Name
} else {
cniConfName = delegate.Conf.Name
}
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Add: %s:%s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], podUID, delegate.Name, cniConfName, rt.IfName, string(data))
}
// get IP addresses from result
ips := []string{}
res, err := cnicurrent.NewResultFromResult(result)
if err != nil {
logging.Errorf("delegateAdd: error converting result: %v", err)
return result, nil
}
for _, ip := range res.IPs {
ips = append(ips, ip.Address.String())
}
if pod != nil {
// send kubernetes events
if delegate.Name != "" {
kubeClient.Eventf(pod, v1.EventTypeNormal, "AddedInterface", "Add %s %v from %s", rt.IfName, ips, delegate.Name)
} else {
kubeClient.Eventf(pod, v1.EventTypeNormal, "AddedInterface", "Add %s %v", rt.IfName, ips)
}
} else {
// for further debug https://github.com/k8snetworkplumbingwg/multus-cni/issues/481
logging.Errorf("delegateAdd: pod nil pointer: namespace: %s, name: %s, container id: %s, pod: %v", rt.Args[1][1], rt.Args[2][1], rt.Args[3][1], pod)
}
return result, nil
}
func delegateCheck(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
logging.Debugf("delegateCheck: %v, %s, %v, %v", exec, ifName, delegateConf, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateCheck: error setting environment variable CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var cniConfName string
if delegateConf.ConfListPlugin {
cniConfName = delegateConf.ConfList.Name
} else {
cniConfName = delegateConf.Conf.Name
}
logging.Verbosef("Check: %s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], delegateConf.Name, cniConfName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistCheck(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateCheck: error invoking ConflistCheck - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
err = confCheck(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateCheck: error invoking DelegateCheck - %q: %v", delegateConf.Conf.Type, err)
}
}
return err
}
func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, multusNetconf *types.NetConf) error {
logging.Debugf("delegateDel: %v, %v, %s, %v, %v", exec, pod, ifName, delegateConf, rt)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateDel: error setting environment variable CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var confName string
if delegateConf.ConfListPlugin {
confName = delegateConf.ConfList.Name
} else {
confName = delegateConf.Conf.Name
}
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Del: %s:%s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], podUID, confName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistDel(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking ConflistDel - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
err = confDel(rt, delegateConf.Bytes, multusNetconf, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking DelegateDel - %q: %v", delegateConf.Conf.Type, err)
}
}
return err
}
// delPlugins deletes plugins in reverse order from lastdIdx
// Uses netRt as base RuntimeConf (coming from NetConf) but merges it
// with each of the delegates' configuration
func delPlugins(exec invoke.Exec, pod *v1.Pod, args *skel.CmdArgs, k8sArgs *types.K8sArgs, delegates []*types.DelegateNetConf, lastIdx int, netRt *types.RuntimeConfig, multusNetconf *types.NetConf) error {
logging.Debugf("delPlugins: %v, %v, %v, %v, %v, %d, %v", exec, pod, args, k8sArgs, delegates, lastIdx, netRt)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("delPlugins: error setting environment variable CNI_COMMAND to a value of DEL")
}
var errorstrings []string
for idx := lastIdx; idx >= 0; idx-- {
ifName := getIfname(delegates[idx], args.IfName, idx)
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, netRt, delegates[idx])
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, pod, ifName, delegates[idx], rt, multusNetconf); err != nil {
errorstrings = append(errorstrings, err.Error())
}
if cniDeviceInfoPath != "" {
err := nadutils.CleanDeviceInfoForCNI(cniDeviceInfoPath)
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
if err != nil {
logging.Debugf("delPlugins: CleanDeviceInfoForCNI returned an error - err=%v", err)
}
}
}
// Check if we had any errors, and send them all back.
if len(errorstrings) > 0 {
return fmt.Errorf(strings.Join(errorstrings, " / "))
}
return nil
}
func cmdErr(k8sArgs *types.K8sArgs, format string, args ...interface{}) error {
prefix := "Multus: "
if k8sArgs != nil {
prefix += fmt.Sprintf("[%s/%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME)
}
return logging.Errorf(prefix+format, args...)
}
func cmdPluginErr(k8sArgs *types.K8sArgs, confName string, format string, args ...interface{}) error {
msg := ""
if k8sArgs != nil {
msg += fmt.Sprintf("[%s/%s:%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, confName)
}
return logging.Errorf(msg+format, args...)
}
func isCriticalRequestRetriable(err error) bool {
logging.Debugf("isCriticalRequestRetriable: %v", err)
errorTypesAllowingRetry := []func(error) bool{
errors.IsServiceUnavailable, errors.IsInternalError, k8snet.IsConnectionReset, k8snet.IsConnectionRefused}
for _, f := range errorTypesAllowingRetry {
if f(err) {
return true
}
}
return false
}
//CmdAdd ...
func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (cnitypes.Result, error) {
n, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdAdd: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return nil, cmdErr(nil, "error loading netconf: %v", err)
}
kubeClient, err = k8s.GetK8sClient(n.Kubeconfig, kubeClient)
if err != nil {
return nil, cmdErr(nil, "error getting k8s client: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, cmdErr(nil, "error getting k8s args: %v", err)
}
if n.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return nil, cmdErr(k8sArgs, "have you checked that your default network is ready? still waiting for readinessindicatorfile @ %v. pollimmediate error: %v", n.ReadinessIndicatorFile, err)
}
}
pod := (*v1.Pod)(nil)
if kubeClient != nil {
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
var waitErr error
// in case of a retriable error, retry 10 times with 0.25 sec interval
if isCriticalRequestRetriable(err) {
waitErr = wait.PollImmediate(shortPollDuration, shortPollTimeout, func() (bool, error) {
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
return pod != nil, err
})
// retry failed, then return error with retry out
if waitErr != nil {
return nil, cmdErr(k8sArgs, "error getting pod with error: %v", err)
}
} else {
// Other case, return error
return nil, cmdErr(k8sArgs, "error getting pod: %v", err)
}
}
}
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
// This will only be initialized once and all delegate objects can reference this to look up device info.
var resourceMap map[string]*types.ResourceInfo
if n.ClusterNetwork != "" {
resourceMap, err = k8s.GetDefaultNetworks(pod, n, kubeClient, resourceMap)
if err != nil {
return nil, cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
_, kc, err := k8s.TryLoadPodDelegates(pod, n, kubeClient, resourceMap)
if err != nil {
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
}
// cache the multus config
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
}
var result, tmpResult cnitypes.Result
var netStatus []nettypes.NetworkStatus
cniArgs := os.Getenv("CNI_ARGS")
for idx, delegate := range n.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
rt, cniDeviceInfoPath := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, n.RuntimeConfig, delegate)
if cniDeviceInfoPath != "" && delegate.ResourceName != "" && delegate.DeviceID != "" {
err = nadutils.CopyDeviceInfoForCNIFromDP(cniDeviceInfoPath, delegate.ResourceName, delegate.DeviceID)
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
if err != nil {
logging.Debugf("cmdAdd: CopyDeviceInfoForCNIFromDP returned an error - err=%v", err)
}
}
tmpResult, err = delegateAdd(exec, kubeClient, pod, ifName, delegate, rt, n, cniArgs)
if err != nil {
// If the add failed, tear down all networks we already added
netName := delegate.Conf.Name
if netName == "" {
netName = delegate.ConfList.Name
}
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, nil, args, k8sArgs, n.Delegates, idx, n.RuntimeConfig, n)
return nil, cmdPluginErr(k8sArgs, netName, "error adding container to network %q: %v", netName, err)
}
// Remove gateway from routing table if the gateway is not used
deletegateway := false
adddefaultgateway := false
if delegate.IsFilterGateway {
deletegateway = true
logging.Debugf("Marked interface %v for gateway deletion", ifName)
} else {
// Otherwise, determine if this interface now gets our default route.
// According to
// https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ (4.1.2.1.9)
// the list can be empty; if it is, we'll assume the CNI's config for the default gateway holds,
// else we'll update the defaultgateway to the one specified.
if delegate.GatewayRequest != nil && delegate.GatewayRequest[0] != nil {
deletegateway = true
adddefaultgateway = true
logging.Debugf("Detected gateway override on interface %v to %v", ifName, delegate.GatewayRequest)
}
}
if deletegateway {
tmpResult, err = netutils.DeleteDefaultGW(args, ifName, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error deleting default gateway: %v", err)
}
}
// Here we'll set the default gateway
if adddefaultgateway {
tmpResult, err = netutils.SetDefaultGW(args, ifName, delegate.GatewayRequest, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting default gateway: %v", err)
}
}
// Master plugin result is always used if present
if delegate.MasterPlugin || result == nil {
result = tmpResult
}
// Read devInfo from CNIDeviceInfoFile if it exists so
// it can be copied to the NetworkStatus.
devinfo, err := getDelegateDeviceInfo(delegate, rt)
if err != nil {
// Even if the filename is set, file may not be present. Ignore error,
// but log and in the future may need to filter on specific errors.
logging.Debugf("cmdAdd: getDelegateDeviceInfo returned an error - err=%v", err)
}
// create the network status, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
delegateNetStatus, err := nadutils.CreateNetworkStatus(tmpResult, delegate.Name, delegate.MasterPlugin, devinfo)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting network status: %v", err)
}
netStatus = append(netStatus, *delegateNetStatus)
}
} else if devinfo != nil {
// Warn that devinfo exists but could not add it to downwards API
logging.Errorf("devinfo available, but no kubeConfig so NetworkStatus not modified.")
}
}
// set the network status annotation in apiserver, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
if err != nil {
if strings.Contains(err.Error(), "failed to query the pod") {
return nil, cmdErr(k8sArgs, "error setting the networks status, pod was already deleted: %v", err)
}
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
}
}
}
return result, nil
}
//CmdCheck ...
func CmdCheck(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
in, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdCheck: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return err
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return cmdErr(nil, "error getting k8s args: %v", err)
}
for idx, delegate := range in.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
rt, _ := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, in.RuntimeConfig, delegate)
err = delegateCheck(exec, ifName, delegate, rt, in)
if err != nil {
return err
}
}
return nil
}
//CmdDel ...
func CmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
in, err := types.LoadNetConf(args.StdinData)
logging.Debugf("CmdDel: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return err
}
netnsfound := true
netns, err := ns.GetNS(args.Netns)
if err != nil {
// if NetNs is passed down by the Cloud Orchestration Engine, or if it called multiple times
// so don't return an error if the device is already removed.
// https://github.com/kubernetes/kubernetes/issues/43014#issuecomment-287164444
_, ok := err.(ns.NSPathNotExistErr)
if ok {
netnsfound = false
logging.Debugf("CmdDel: WARNING netns may not exist, netns: %s, err: %s", args.Netns, err)
} else {
return cmdErr(nil, "failed to open netns %q: %v", netns, err)
}
}
if netns != nil {
defer netns.Close()
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return cmdErr(nil, "error getting k8s args: %v", err)
}
if in.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(in.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return cmdErr(k8sArgs, "PollImmediate error waiting for ReadinessIndicatorFile (on del): %v", err)
}
}
kubeClient, err = k8s.GetK8sClient(in.Kubeconfig, kubeClient)
if err != nil {
return cmdErr(nil, "error getting k8s client: %v", err)
}
pod := (*v1.Pod)(nil)
if kubeClient != nil {
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
var waitErr error
// in case of a retriable error, retry 10 times with 0.25 sec interval
if isCriticalRequestRetriable(err) {
waitErr = wait.PollImmediate(shortPollDuration, shortPollTimeout, func() (bool, error) {
pod, err = kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
return pod != nil, err
})
// retry failed, then return error with retry out
if waitErr != nil {
return cmdErr(k8sArgs, "error getting pod with error: %v", err)
}
} else if errors.IsNotFound(err) {
// If not found, proceed to remove interface with cache
pod = nil
} else {
// Other case, return error
return cmdErr(k8sArgs, "error getting pod: %v", err)
}
}
}
// Read the cache to get delegates json for the pod
netconfBytes, path, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
if err != nil {
// Fetch delegates again if cache is not exist and pod info can be read
if os.IsNotExist(err) && pod != nil {
if in.ClusterNetwork != "" {
_, err = k8s.GetDefaultNetworks(pod, in, kubeClient, nil)
if err != nil {
return cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// Get pod annotation and so on
_, _, err := k8s.TryLoadPodDelegates(pod, in, kubeClient, nil)
if err != nil {
if len(in.Delegates) == 0 {
// No delegate available so send error
return cmdErr(k8sArgs, "failed to get delegates: %v", err)
}
// Get clusterNetwork before, so continue to delete
logging.Errorf("Multus: failed to get delegates: %v, but continue to delete clusterNetwork", err)
}
} else {
// The options to continue with a delete have been exhausted (cachefile + API query didn't work)
// We cannot exit with an error as this may cause a sandbox to never get deleted.
logging.Errorf("Multus: failed to get the cached delegates file: %v, cannot properly delete", err)
return nil
}
} else {
defer os.Remove(path)
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return cmdErr(k8sArgs, "failed to load netconf: %v", err)
}
// check plugins field and enable ConfListPlugin if there is
for _, v := range in.Delegates {
if len(v.ConfList.Plugins) != 0 {
v.ConfListPlugin = true
}
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// set CNIVersion in delegate CNI config if there is no CNIVersion and multus conf have CNIVersion.
for _, v := range in.Delegates {
if v.ConfListPlugin == true && v.ConfList.CNIVersion == "" && in.CNIVersion != "" {
v.ConfList.CNIVersion = in.CNIVersion
v.Bytes, err = json.Marshal(v.ConfList)
}
}
// unset the network status annotation in apiserver, only in case Multus as kubeconfig
if in.Kubeconfig != "" {
if netnsfound {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), in.SystemNamespaces) {
err := k8s.SetNetworkStatus(kubeClient, k8sArgs, nil, in)
if err != nil {
// error happen but continue to delete
logging.Errorf("Multus: error unsetting the networks status: %v", err)
}
}
} else {
logging.Debugf("WARNING: Unset SetNetworkStatus skipped due to netns not found.")
}
}
return delPlugins(exec, pod, args, k8sArgs, in.Delegates, len(in.Delegates)-1, in.RuntimeConfig, in)
}

File diff suppressed because it is too large Load Diff

119
pkg/netutils/netutils.go Normal file
View File

@@ -0,0 +1,119 @@
// Copyright (c) 2019 Multus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package netutils
import (
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
"net"
"strings"
)
// DeleteDefaultGW removes the default gateway from marked interfaces.
func DeleteDefaultGW(args *skel.CmdArgs, ifName string, res *cnitypes.Result) (*current.Result, error) {
result, err := current.NewResultFromResult(*res)
if err != nil {
return nil, logging.Errorf("DeleteDefaultGW: Error creating new from current CNI result: %v", err)
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return nil, logging.Errorf("DeleteDefaultGW: Error getting namespace %v", err)
}
defer netns.Close()
err = netns.Do(func(_ ns.NetNS) error {
var err error
link, _ := netlink.LinkByName(ifName)
routes, _ := netlink.RouteList(link, netlink.FAMILY_ALL)
for _, nlroute := range routes {
if nlroute.Dst == nil {
err = netlink.RouteDel(&nlroute)
}
}
return err
})
var newRoutes []*cnitypes.Route
for _, route := range result.Routes {
if mask, _ := route.Dst.Mask.Size(); mask != 0 {
newRoutes = append(newRoutes, route)
}
}
result.Routes = newRoutes
return result, err
}
// SetDefaultGW adds a default gateway on a specific interface
func SetDefaultGW(args *skel.CmdArgs, ifName string, gateways []net.IP, res *cnitypes.Result) (*current.Result, error) {
// Use the current CNI result...
result, err := current.NewResultFromResult(*res)
if err != nil {
return nil, logging.Errorf("SetDefaultGW: Error creating new CNI result from current: %v", err)
}
// This ensures we're acting within the net namespace for the pod.
netns, err := ns.GetNS(args.Netns)
if err != nil {
return nil, logging.Errorf("SetDefaultGW: Error getting namespace %v", err)
}
defer netns.Close()
var newResultDefaultRoutes []*cnitypes.Route
// Do this within the net namespace.
err = netns.Do(func(_ ns.NetNS) error {
var err error
// Pick up the link info as we need the index.
link, _ := netlink.LinkByName(ifName)
// Cycle through all the desired gateways.
for _, gw := range gateways {
// Create a new route (note: dst is nil by default)
logging.Debugf("SetDefaultGW: Adding default route on %v (index: %v) to %v", ifName, link.Attrs().Index, gw)
newDefaultRoute := netlink.Route{
LinkIndex: link.Attrs().Index,
Gw: gw,
}
// Build a new element for the results route
// Set a correct CIDR depending on IP type
_, dstipnet, _ := net.ParseCIDR("::0/0")
if strings.Count(gw.String(), ":") < 2 {
_, dstipnet, _ = net.ParseCIDR("0.0.0.0/0")
}
newResultDefaultRoutes = append(newResultDefaultRoutes, &cnitypes.Route{Dst: *dstipnet, GW: gw})
// Perform the creation of the default route....
err = netlink.RouteAdd(&newDefaultRoute)
if err != nil {
logging.Errorf("SetDefaultGW: Error adding route: %v", err)
}
}
return err
})
result.Routes = newResultDefaultRoutes
return result, err
}

View File

@@ -24,123 +24,56 @@ import (
"os"
"strings"
netv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/containernetworking/cni/pkg/types"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/onsi/gomega"
)
// FakeKubeClient is stub KubeClient for testing
type FakeKubeClient struct {
pods map[string]*v1.Pod
PodCount int
nets map[string]string
NetCount int
}
// NewFakeKubeClient creates FakeKubeClient for testing
func NewFakeKubeClient() *FakeKubeClient {
return &FakeKubeClient{
pods: make(map[string]*v1.Pod),
nets: make(map[string]string),
// NewFakeNetAttachDef returns net-attach-def for testing
func NewFakeNetAttachDef(namespace, name, config string) *netv1.NetworkAttachmentDefinition {
return &netv1.NetworkAttachmentDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: netv1.NetworkAttachmentDefinitionSpec{
Config: config,
},
}
}
// GetRawWithPath returns k8s raw data from its path
func (f *FakeKubeClient) GetRawWithPath(path string) ([]byte, error) {
obj, ok := f.nets[path]
if !ok {
return nil, fmt.Errorf("resource not found")
// NewFakeNetAttachDefFile returns net-attach-def for testing with conf file
func NewFakeNetAttachDefFile(namespace, name, filePath, fileData string) *netv1.NetworkAttachmentDefinition {
netAttach := &netv1.NetworkAttachmentDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
f.NetCount++
return []byte(obj), nil
}
// AddNetConfig adds net-attach-def into its client
func (f *FakeKubeClient) AddNetConfig(namespace, name, data string) {
cr := fmt.Sprintf(`{
"apiVersion": "k8s.cni.cncf.io/v1",
"kind": "Network",
"metadata": {
"namespace": "%s",
"name": "%s"
},
"spec": {
"config": "%s"
}
}`, namespace, name, strings.Replace(data, "\"", "\\\"", -1))
cr = strings.Replace(cr, "\n", "", -1)
cr = strings.Replace(cr, "\t", "", -1)
f.nets[fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, name)] = cr
}
// AddNetConfigAnnotation adds net-attach-def into its client with an annotation
func (f *FakeKubeClient) AddNetConfigAnnotation(namespace, name, data string) {
cr := fmt.Sprintf(`{
"apiVersion": "k8s.cni.cncf.io/v1",
"kind": "Network",
"metadata": {
"namespace": "%s",
"name": "%s",
"annotations": {
"k8s.v1.cni.cncf.io/resourceName": "intel.com/sriov"
}
},
"spec": {
"config": "%s"
}
}`, namespace, name, strings.Replace(data, "\"", "\\\"", -1))
cr = strings.Replace(cr, "\n", "", -1)
cr = strings.Replace(cr, "\t", "", -1)
f.nets[fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, name)] = cr
}
// AddNetFile puts config file as net-attach-def
func (f *FakeKubeClient) AddNetFile(namespace, name, filePath, fileData string) {
cr := fmt.Sprintf(`{
"apiVersion": "k8s.cni.cncf.io/v1",
"kind": "Network",
"metadata": {
"namespace": "%s",
"name": "%s"
}
}`, namespace, name)
f.nets[fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, name)] = cr
err := ioutil.WriteFile(filePath, []byte(fileData), 0600)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return netAttach
}
// GetPod query pod by namespace/pod and return it if exists
func (f *FakeKubeClient) GetPod(namespace, name string) (*v1.Pod, error) {
key := fmt.Sprintf("%s/%s", namespace, name)
pod, ok := f.pods[key]
if !ok {
return nil, fmt.Errorf("pod not found")
// NewFakeNetAttachDefAnnotation returns net-attach-def with resource annotation
func NewFakeNetAttachDefAnnotation(namespace, name, config string) *netv1.NetworkAttachmentDefinition {
return &netv1.NetworkAttachmentDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{
"k8s.v1.cni.cncf.io/resourceName": "intel.com/sriov",
},
},
Spec: netv1.NetworkAttachmentDefinitionSpec{
Config: config,
},
}
f.PodCount++
return pod, nil
}
// UpdatePodStatus update pod status
func (f *FakeKubeClient) UpdatePodStatus(pod *v1.Pod) (*v1.Pod, error) {
key := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
f.pods[key] = pod
return f.pods[key], nil
}
// AddPod adds pod into fake client
func (f *FakeKubeClient) AddPod(pod *v1.Pod) {
key := fmt.Sprintf("%s/%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
f.pods[key] = pod
}
// DeletePod remove pod from fake client
func (f *FakeKubeClient) DeletePod(pod *v1.Pod) {
key := fmt.Sprintf("%s/%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
delete(f.pods, key)
}
// NewFakePod creates fake Pod object
@@ -149,6 +82,7 @@ func NewFakePod(name string, netAnnotation string, defaultNetAnnotation string)
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "test",
UID: "testUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{

527
pkg/types/conf.go Normal file
View File

@@ -0,0 +1,527 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package types
import (
"encoding/json"
"fmt"
"net"
"strings"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
"gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/logging"
)
const (
defaultCNIDir = "/var/lib/cni/multus"
defaultConfDir = "/etc/cni/multus/net.d"
defaultBinDir = "/opt/cni/bin"
defaultReadinessIndicatorFile = ""
defaultMultusNamespace = "kube-system"
defaultNonIsolatedNamespace = "default"
)
// LoadDelegateNetConfList reads DelegateNetConf from bytes
func LoadDelegateNetConfList(bytes []byte, delegateConf *DelegateNetConf) error {
logging.Debugf("LoadDelegateNetConfList: %s, %v", string(bytes), delegateConf)
if err := json.Unmarshal(bytes, &delegateConf.ConfList); err != nil {
return logging.Errorf("LoadDelegateNetConfList: error unmarshalling delegate conflist: %v", err)
}
if delegateConf.ConfList.Plugins == nil {
return logging.Errorf("LoadDelegateNetConfList: delegate must have the 'type' or 'plugin' field")
}
if delegateConf.ConfList.Plugins[0].Type == "" {
return logging.Errorf("LoadDelegateNetConfList: a plugin delegate must have the 'type' field")
}
delegateConf.ConfListPlugin = true
delegateConf.Name = delegateConf.ConfList.Name
return nil
}
// LoadDelegateNetConf converts raw CNI JSON into a DelegateNetConf structure
func LoadDelegateNetConf(bytes []byte, net *NetworkSelectionElement, deviceID string, resourceName string) (*DelegateNetConf, error) {
var err error
logging.Debugf("LoadDelegateNetConf: %s, %v, %s", string(bytes), net, deviceID)
delegateConf := &DelegateNetConf{}
if err := json.Unmarshal(bytes, &delegateConf.Conf); err != nil {
return nil, logging.Errorf("LoadDelegateNetConf: error unmarshalling delegate config: %v", err)
}
delegateConf.Name = delegateConf.Conf.Name
// Do some minimal validation
if delegateConf.Conf.Type == "" {
if err := LoadDelegateNetConfList(bytes, delegateConf); err != nil {
return nil, logging.Errorf("LoadDelegateNetConf: failed with: %v", err)
}
if deviceID != "" {
bytes, err = addDeviceIDInConfList(bytes, deviceID)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf: failed to add deviceID in NetConfList bytes: %v", err)
}
}
if net != nil && net.CNIArgs != nil {
bytes, err = addCNIArgsInConfList(bytes, net.CNIArgs)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf(): failed to add cni-args in NetConfList bytes: %v", err)
}
}
} else {
if deviceID != "" {
bytes, err = delegateAddDeviceID(bytes, deviceID)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf: failed to add deviceID in NetConf bytes: %v", err)
}
// Save them for housekeeping
delegateConf.ResourceName = resourceName
delegateConf.DeviceID = deviceID
}
if net != nil && net.CNIArgs != nil {
bytes, err = addCNIArgsInConfig(bytes, net.CNIArgs)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf(): failed to add cni-args in NetConfList bytes: %v", err)
}
}
}
if net != nil {
if net.Name != "" {
// Overwrite CNI config name with net-attach-def name
delegateConf.Name = fmt.Sprintf("%s/%s", net.Namespace, net.Name)
}
if net.InterfaceRequest != "" {
delegateConf.IfnameRequest = net.InterfaceRequest
}
if net.MacRequest != "" {
delegateConf.MacRequest = net.MacRequest
}
if net.IPRequest != nil {
delegateConf.IPRequest = net.IPRequest
}
if net.BandwidthRequest != nil {
delegateConf.BandwidthRequest = net.BandwidthRequest
}
if net.PortMappingsRequest != nil {
delegateConf.PortMappingsRequest = net.PortMappingsRequest
}
if net.GatewayRequest != nil {
delegateConf.GatewayRequest = append(delegateConf.GatewayRequest, net.GatewayRequest...)
}
if net.InfinibandGUIDRequest != "" {
delegateConf.InfinibandGUIDRequest = net.InfinibandGUIDRequest
}
if net.DeviceID != "" {
if deviceID != "" {
logging.Debugf("Warning: Both RuntimeConfig and ResourceMap provide deviceID. Ignoring RuntimeConfig")
} else {
delegateConf.DeviceID = net.DeviceID
}
}
}
delegateConf.Bytes = bytes
return delegateConf, nil
}
// mergeCNIRuntimeConfig creates CNI runtimeconfig from delegate
func mergeCNIRuntimeConfig(runtimeConfig *RuntimeConfig, delegate *DelegateNetConf) *RuntimeConfig {
logging.Debugf("mergeCNIRuntimeConfig: %v %v", runtimeConfig, delegate)
var mergedRuntimeConfig RuntimeConfig
if runtimeConfig == nil {
mergedRuntimeConfig = RuntimeConfig{}
} else {
mergedRuntimeConfig = *runtimeConfig
}
// multus inject RuntimeConfig only in case of non MasterPlugin.
if delegate.MasterPlugin != true {
logging.Debugf("mergeCNIRuntimeConfig: add runtimeConfig for net-attach-def: %v", mergedRuntimeConfig)
if delegate.PortMappingsRequest != nil {
mergedRuntimeConfig.PortMaps = delegate.PortMappingsRequest
}
if delegate.BandwidthRequest != nil {
mergedRuntimeConfig.Bandwidth = delegate.BandwidthRequest
}
if delegate.IPRequest != nil {
mergedRuntimeConfig.IPs = delegate.IPRequest
}
if delegate.MacRequest != "" {
mergedRuntimeConfig.Mac = delegate.MacRequest
}
if delegate.InfinibandGUIDRequest != "" {
mergedRuntimeConfig.InfinibandGUID = delegate.InfinibandGUIDRequest
}
if delegate.DeviceID != "" {
mergedRuntimeConfig.DeviceID = delegate.DeviceID
}
logging.Debugf("mergeCNIRuntimeConfig: add runtimeConfig for net-attach-def: %v", mergedRuntimeConfig)
}
return &mergedRuntimeConfig
}
// CreateCNIRuntimeConf create CNI RuntimeConf for a delegate. If delegate configuration
// exists, merge data with the runtime config.
func CreateCNIRuntimeConf(args *skel.CmdArgs, k8sArgs *K8sArgs, ifName string, rc *RuntimeConfig, delegate *DelegateNetConf) (*libcni.RuntimeConf, string) {
logging.Debugf("LoadCNIRuntimeConf: %v, %v, %s, %v %v", args, k8sArgs, ifName, rc, delegate)
var cniDeviceInfoFile string
var delegateRc *RuntimeConfig
if delegate != nil {
delegateRc = mergeCNIRuntimeConfig(rc, delegate)
if delegateRc.DeviceID != "" {
if delegateRc.CNIDeviceInfoFile != "" {
logging.Debugf("Warning: Existing value of CNIDeviceInfoFile will be overwritten %s", delegateRc.CNIDeviceInfoFile)
}
autoDeviceInfo := fmt.Sprintf("%s-%s_%s", delegate.Name, args.ContainerID, ifName)
delegateRc.CNIDeviceInfoFile = nadutils.GetCNIDeviceInfoPath(autoDeviceInfo)
cniDeviceInfoFile = delegateRc.CNIDeviceInfoFile
logging.Debugf("Adding auto-generated CNIDeviceInfoFile: %s", delegateRc.CNIDeviceInfoFile)
}
} else {
delegateRc = rc
}
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#buildCNIRuntimeConf
// Todo
// ingress, egress and bandwidth capability features as same as kubelet.
rt := &libcni.RuntimeConf{
ContainerID: args.ContainerID,
NetNS: args.Netns,
IfName: ifName,
// NOTE: Verbose logging depends on this order, so please keep Args order.
Args: [][2]string{
{"IgnoreUnknown", string("true")},
{"K8S_POD_NAMESPACE", string(k8sArgs.K8S_POD_NAMESPACE)},
{"K8S_POD_NAME", string(k8sArgs.K8S_POD_NAME)},
{"K8S_POD_INFRA_CONTAINER_ID", string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID)},
},
}
if delegateRc != nil {
capabilityArgs := map[string]interface{}{}
if len(delegateRc.PortMaps) != 0 {
capabilityArgs["portMappings"] = delegateRc.PortMaps
}
if delegateRc.Bandwidth != nil {
capabilityArgs["bandwidth"] = delegateRc.Bandwidth
}
if len(delegateRc.IPs) != 0 {
capabilityArgs["ips"] = delegateRc.IPs
}
if len(delegateRc.Mac) != 0 {
capabilityArgs["mac"] = delegateRc.Mac
}
if len(delegateRc.InfinibandGUID) != 0 {
capabilityArgs["infinibandGUID"] = delegateRc.InfinibandGUID
}
if delegateRc.DeviceID != "" {
capabilityArgs["deviceID"] = delegateRc.DeviceID
}
if delegateRc.CNIDeviceInfoFile != "" {
capabilityArgs["CNIDeviceInfoFile"] = delegateRc.CNIDeviceInfoFile
}
rt.CapabilityArgs = capabilityArgs
}
return rt, cniDeviceInfoFile
}
// GetGatewayFromResult retrieves gateway IP addresses from CNI result
func GetGatewayFromResult(result *current.Result) []net.IP {
var gateways []net.IP
for _, route := range result.Routes {
if mask, _ := route.Dst.Mask.Size(); mask == 0 {
gateways = append(gateways, route.GW)
}
}
return gateways
}
// LoadNetConf converts inputs (i.e. stdin) to NetConf
func LoadNetConf(bytes []byte) (*NetConf, error) {
// LogToStderr's default value set to true
netconf := &NetConf{LogToStderr: true}
logging.Debugf("LoadNetConf: %s", string(bytes))
if err := json.Unmarshal(bytes, netconf); err != nil {
return nil, logging.Errorf("LoadNetConf: failed to load netconf: %v", err)
}
// Logging
logging.SetLogStderr(netconf.LogToStderr)
if netconf.LogFile != "" {
logging.SetLogFile(netconf.LogFile)
}
if netconf.LogLevel != "" {
logging.SetLogLevel(netconf.LogLevel)
}
// Parse previous result
if netconf.RawPrevResult != nil {
resultBytes, err := json.Marshal(netconf.RawPrevResult)
if err != nil {
return nil, logging.Errorf("LoadNetConf: could not serialize prevResult: %v", err)
}
res, err := version.NewResult(netconf.CNIVersion, resultBytes)
if err != nil {
return nil, logging.Errorf("LoadNetConf: could not parse prevResult: %v", err)
}
netconf.RawPrevResult = nil
netconf.PrevResult, err = current.NewResultFromResult(res)
if err != nil {
return nil, logging.Errorf("LoadNetConf: could not convert result to current version: %v", err)
}
}
// Delegates must always be set. If no kubeconfig is present, the
// delegates are executed in-order. If a kubeconfig is present,
// at least one delegate must be present and the first delegate is
// the master plugin. Kubernetes CRD delegates are then appended to
// the existing delegate list and all delegates executed in-order.
if len(netconf.RawDelegates) == 0 && netconf.ClusterNetwork == "" {
return nil, logging.Errorf("LoadNetConf: at least one delegate/defaultNetwork must be specified")
}
if netconf.CNIDir == "" {
netconf.CNIDir = defaultCNIDir
}
if netconf.ConfDir == "" {
netconf.ConfDir = defaultConfDir
}
if netconf.BinDir == "" {
netconf.BinDir = defaultBinDir
}
if netconf.ReadinessIndicatorFile == "" {
netconf.ReadinessIndicatorFile = defaultReadinessIndicatorFile
}
if len(netconf.SystemNamespaces) == 0 {
netconf.SystemNamespaces = []string{"kube-system"}
}
if netconf.MultusNamespace == "" {
netconf.MultusNamespace = defaultMultusNamespace
}
// setup namespace isolation
if netconf.RawNonIsolatedNamespaces == "" {
netconf.NonIsolatedNamespaces = []string{defaultNonIsolatedNamespace}
} else {
// Parse the comma separated list
nonisolated := strings.Split(netconf.RawNonIsolatedNamespaces, ",")
// Cleanup the whitespace
for i, nonv := range nonisolated {
nonisolated[i] = strings.TrimSpace(nonv)
}
netconf.NonIsolatedNamespaces = nonisolated
}
// get RawDelegates and put delegates field
if netconf.ClusterNetwork == "" {
// for Delegates
if len(netconf.RawDelegates) == 0 {
return nil, logging.Errorf("LoadNetConf: at least one delegate must be specified")
}
for idx, rawConf := range netconf.RawDelegates {
bytes, err := json.Marshal(rawConf)
if err != nil {
return nil, logging.Errorf("LoadNetConf: error marshalling delegate %d config: %v", idx, err)
}
delegateConf, err := LoadDelegateNetConf(bytes, nil, "", "")
if err != nil {
return nil, logging.Errorf("LoadNetConf: failed to load delegate %d config: %v", idx, err)
}
netconf.Delegates = append(netconf.Delegates, delegateConf)
}
netconf.RawDelegates = nil
// First delegate is always the master plugin
netconf.Delegates[0].MasterPlugin = true
}
return netconf, nil
}
// AddDelegates appends the new delegates to the delegates list
func (n *NetConf) AddDelegates(newDelegates []*DelegateNetConf) error {
logging.Debugf("AddDelegates: %v", newDelegates)
n.Delegates = append(n.Delegates, newDelegates...)
return nil
}
// delegateAddDeviceID injects deviceID information in delegate bytes
func delegateAddDeviceID(inBytes []byte, deviceID string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("delegateAddDeviceID: failed to unmarshal inBytes: %v", err)
}
// Inject deviceID
rawConfig["deviceID"] = deviceID
rawConfig["pciBusID"] = deviceID
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("delegateAddDeviceID: failed to re-marshal Spec.Config: %v", err)
}
logging.Debugf("delegateAddDeviceID updated configBytes %s", string(configBytes))
return configBytes, nil
}
// addDeviceIDInConfList injects deviceID information in delegate bytes
func addDeviceIDInConfList(inBytes []byte, deviceID string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("addDeviceIDInConfList: failed to unmarshal inBytes: %v", err)
}
pList, ok := rawConfig["plugins"]
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList: unable to get plugin list")
}
pMap, ok := pList.([]interface{})
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList: unable to typecast plugin list")
}
for idx, plugin := range pMap {
currentPlugin, ok := plugin.(map[string]interface{})
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList: unable to typecast plugin #%d", idx)
}
// Inject deviceID
currentPlugin["deviceID"] = deviceID
currentPlugin["pciBusID"] = deviceID
}
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("addDeviceIDInConfList: failed to re-marshal: %v", err)
}
logging.Debugf("addDeviceIDInConfList: updated configBytes %s", string(configBytes))
return configBytes, nil
}
// injectCNIArgs injects given args to cniConfig
func injectCNIArgs(cniConfig *map[string]interface{}, args *map[string]interface{}) error {
if argsval, ok := (*cniConfig)["args"]; ok {
argsvalmap := argsval.(map[string]interface{})
if cnival, ok := argsvalmap["cni"]; ok {
cnivalmap := cnival.(map[string]interface{})
// merge it if conf has args
for key, val := range *args {
cnivalmap[key] = val
}
} else {
argsvalmap["cni"] = *args
}
} else {
argsval := map[string]interface{}{}
argsval["cni"] = *args
(*cniConfig)["args"] = argsval
}
return nil
}
// addCNIArgsInConfig injects given cniArgs to CNI config in inBytes
func addCNIArgsInConfig(inBytes []byte, cniArgs *map[string]interface{}) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("addCNIArgsInConfig(): failed to unmarshal inBytes: %v", err)
}
injectCNIArgs(&rawConfig, cniArgs)
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("addCNIArgsInConfig(): failed to re-marshal: %v", err)
}
return configBytes, nil
}
// addCNIArgsInConfList injects given cniArgs to CNI conflist in inBytes
func addCNIArgsInConfList(inBytes []byte, cniArgs *map[string]interface{}) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("addCNIArgsInConfList(): failed to unmarshal inBytes: %v", err)
}
pList, ok := rawConfig["plugins"]
if !ok {
return nil, logging.Errorf("addCNIArgsInConfList(): unable to get plugin list")
}
pMap, ok := pList.([]interface{})
if !ok {
return nil, logging.Errorf("addCNIArgsInConfList(): unable to typecast plugin list")
}
for idx := range pMap {
valMap := pMap[idx].(map[string]interface{})
injectCNIArgs(&valMap, cniArgs)
}
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("addCNIArgsInConfList(): failed to re-marshal: %v", err)
}
return configBytes, nil
}
// CheckGatewayConfig check gatewayRequest and mark IsFilterGateway flag if
// gw filtering is required
func CheckGatewayConfig(delegates []*DelegateNetConf) {
// Check the Gateway
for i, delegate := range delegates {
if delegate.GatewayRequest == nil {
delegates[i].IsFilterGateway = true
}
}
}
// CheckSystemNamespaces checks whether given namespace is in systemNamespaces or not.
func CheckSystemNamespaces(namespace string, systemNamespaces []string) bool {
for _, nsname := range systemNamespaces {
if namespace == nsname {
return true
}
}
return false
}

View File

@@ -26,7 +26,8 @@ import (
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containernetworking/plugins/pkg/testutils"
testhelpers "github.com/intel/multus-cni/testing"
netutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
testhelpers "gopkg.in/k8snetworkplumbingwg/multus-cni.v3/pkg/testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -101,7 +102,7 @@ var _ = Describe("config operations", func() {
_, err := LoadNetConf([]byte(conf))
Expect(err).To(HaveOccurred())
_, err = LoadDelegateNetConf([]byte(conf), nil, "")
_, err = LoadDelegateNetConf([]byte(conf), nil, "", "")
Expect(err).To(HaveOccurred())
err = LoadDelegateNetConfList([]byte(conf), &DelegateNetConf{})
Expect(err).To(HaveOccurred())
@@ -134,6 +135,47 @@ var _ = Describe("config operations", func() {
Expect(netConf.LogFile).To(Equal("/var/log/multus.log"))
})
It("properly sets namespace isolation using the default namespace", func() {
conf := `{
"name": "node-cni-network",
"type": "multus",
"logLevel": "debug",
"logFile": "/var/log/multus.log",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
"namespaceIsolation": true,
"delegates": [{
"type": "weave-net"
}]
}`
netConf, err := LoadNetConf([]byte(conf))
Expect(err).NotTo(HaveOccurred())
Expect(netConf.NamespaceIsolation).To(Equal(true))
Expect(len(netConf.NonIsolatedNamespaces)).To(Equal(1))
Expect(netConf.NonIsolatedNamespaces[0]).To(Equal("default"))
})
It("properly sets namespace isolation using custom namespaces", func() {
conf := `{
"name": "node-cni-network",
"type": "multus",
"logLevel": "debug",
"logFile": "/var/log/multus.log",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
"namespaceIsolation": true,
"globalNamespaces": " foo,bar ,default",
"delegates": [{
"type": "weave-net"
}]
}`
netConf, err := LoadNetConf([]byte(conf))
Expect(err).NotTo(HaveOccurred())
Expect(netConf.NamespaceIsolation).To(Equal(true))
Expect(len(netConf.NonIsolatedNamespaces)).To(Equal(3))
Expect(netConf.NonIsolatedNamespaces[0]).To(Equal("foo"))
Expect(netConf.NonIsolatedNamespaces[1]).To(Equal("bar"))
Expect(netConf.NonIsolatedNamespaces[2]).To(Equal("default"))
})
It("prevResult with no errors", func() {
conf := `{
"name": "node-cni-network",
@@ -279,7 +321,7 @@ var _ = Describe("config operations", func() {
DeviceID string `json:"deviceID"`
}
sriovConf := &sriovNetConf{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0")
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &sriovConf)
@@ -305,7 +347,7 @@ var _ = Describe("config operations", func() {
Plugins []*sriovNetConf `json:"plugins"`
}
sriovConfList := &sriovNetConfList{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.1")
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.1", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &sriovConfList)
@@ -313,6 +355,37 @@ var _ = Describe("config operations", func() {
Expect(sriovConfList.Plugins[0].DeviceID).To(Equal("0000:00:00.1"))
})
It("assigns deviceID in delegated conf list multiple plugins", func() {
conf := `{
"name": "second-network",
"plugins": [
{
"type": "sriov"
},
{
"type": "other-cni"
}
]
}`
type sriovNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
DeviceID string `json:"deviceID"`
}
type sriovNetConfList struct {
Plugins []*sriovNetConf `json:"plugins"`
}
sriovConfList := &sriovNetConfList{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.1", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &sriovConfList)
Expect(err).NotTo(HaveOccurred())
for _, plugin := range sriovConfList.Plugins {
Expect(plugin.DeviceID).To(Equal("0000:00:00.1"))
}
})
It("assigns pciBusID in delegated conf", func() {
conf := `{
"name": "second-network",
@@ -324,7 +397,7 @@ var _ = Describe("config operations", func() {
PCIBusID string `json:"pciBusID"`
}
hostDeviceConf := &hostDeviceNetConf{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.2")
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.2", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &hostDeviceConf)
@@ -350,7 +423,7 @@ var _ = Describe("config operations", func() {
Plugins []*hostDeviceNetConf `json:"plugins"`
}
hostDeviceConfList := &hostDeviceNetConfList{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.3")
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.3", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &hostDeviceConfList)
@@ -358,6 +431,141 @@ var _ = Describe("config operations", func() {
Expect(hostDeviceConfList.Plugins[0].PCIBusID).To(Equal("0000:00:00.3"))
})
It("assigns pciBusID in delegated conf list multiple plugins", func() {
conf := `{
"name": "second-network",
"plugins": [
{
"type": "host-device"
},
{
"type": "other-cni"
}
]
}`
type hostDeviceNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
PCIBusID string `json:"pciBusID"`
}
type hostDeviceNetConfList struct {
Plugins []*hostDeviceNetConf `json:"plugins"`
}
hostDeviceConfList := &hostDeviceNetConfList{}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.3", "")
Expect(err).NotTo(HaveOccurred())
err = json.Unmarshal(delegateNetConf.Bytes, &hostDeviceConfList)
Expect(err).NotTo(HaveOccurred())
for _, plugin := range hostDeviceConfList.Plugins {
Expect(plugin.PCIBusID).To(Equal("0000:00:00.3"))
}
})
It("add cni-args in config", func() {
var args map[string]interface{}
conf := `{
"name": "second-network",
"type": "bridge"
}`
cniArgs := `{
"args1": "val1"
}`
type bridgeNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "", "")
Expect(err).NotTo(HaveOccurred())
bridgeConf := &bridgeNetConf{}
err = json.Unmarshal(delegateNetConf.Bytes, bridgeConf)
Expect(bridgeConf.Args.CNI["args1"]).To(Equal("val1"))
})
It("add cni-args in config which has cni args already (merge case)", func() {
var args map[string]interface{}
conf := `{
"name": "second-network",
"type": "bridge",
"args": {
"cni": {
"args0": "val0",
"args1": "val1"
}
}
}`
cniArgs := `{
"args1": "val1a"
}`
type bridgeNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "", "")
Expect(err).NotTo(HaveOccurred())
bridgeConf := &bridgeNetConf{}
err = json.Unmarshal(delegateNetConf.Bytes, bridgeConf)
Expect(bridgeConf.Args.CNI["args0"]).To(Equal("val0"))
Expect(bridgeConf.Args.CNI["args1"]).To(Equal("val1a"))
})
It("add cni-args in conflist", func() {
var args map[string]interface{}
conf := `{
"name": "second-network",
"plugins": [
{
"type": "bridge"
}
]
}`
cniArgs := `{
"args1": "val1"
}`
type bridgeNetConf struct {
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
type bridgeNetConfList struct {
Name string `json:"name"`
Plugins []*bridgeNetConf `json:"plugins"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "", "")
Expect(err).NotTo(HaveOccurred())
bridgeConflist := &bridgeNetConfList{}
err = json.Unmarshal(delegateNetConf.Bytes, bridgeConflist)
Expect(bridgeConflist.Plugins[0].Args.CNI["args1"]).To(Equal("val1"))
})
It("creates a valid CNI runtime config", func() {
args := &skel.CmdArgs{
ContainerID: "123456789",
@@ -383,19 +591,22 @@ var _ = Describe("config operations", func() {
k8sArgs := &K8sArgs{K8S_POD_NAME: "dummy", K8S_POD_NAMESPACE: "namespacedummy", K8S_POD_INFRA_CONTAINER_ID: "123456789"}
rc := &RuntimeConfig{}
tempportmap := make([]PortMapEntry, 2)
rc.PortMaps = tempportmap
rc.PortMaps = make([]*PortMapEntry, 2)
rc.PortMaps[0].HostPort = 0
rc.PortMaps[0].ContainerPort = 1
rc.PortMaps[0].Protocol = "sampleProtocol"
rc.PortMaps[0].HostIP = "sampleHostIP"
rc.PortMaps[1].HostPort = 1
rc.PortMaps[1].ContainerPort = 2
rc.PortMaps[1].Protocol = "anotherSampleProtocol"
rc.PortMaps[1].HostIP = "anotherSampleHostIP"
rc.PortMaps[0] = &PortMapEntry{
HostPort: 0,
ContainerPort: 1,
Protocol: "sampleProtocol",
HostIP: "sampleHostIP",
}
rc.PortMaps[1] = &PortMapEntry{
HostPort: 1,
ContainerPort: 2,
Protocol: "anotherSampleProtocol",
HostIP: "anotherSampleHostIP",
}
rt := CreateCNIRuntimeConf(args, k8sArgs, "", rc)
rt, _ := CreateCNIRuntimeConf(args, k8sArgs, "", rc, nil)
fmt.Println("rt.ContainerID: ", rt.ContainerID)
Expect(rt.ContainerID).To(Equal("123456789"))
Expect(rt.NetNS).To(Equal(args.Netns))
@@ -425,10 +636,10 @@ var _ = Describe("config operations", func() {
}
}`
delegate, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0")
delegate, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0", "")
Expect(err).NotTo(HaveOccurred())
delegateNetStatus, err := LoadNetworkStatus(result, delegate.Conf.Name, delegate.MasterPlugin)
delegateNetStatus, err := netutils.CreateNetworkStatus(result, delegate.Conf.Name, delegate.MasterPlugin, nil)
GinkgoT().Logf("delegateNetStatus %+v\n", delegateNetStatus)
@@ -458,13 +669,180 @@ var _ = Describe("config operations", func() {
}
}`
delegate, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0")
delegate, err := LoadDelegateNetConf([]byte(conf), nil, "0000:00:00.0", "")
Expect(err).NotTo(HaveOccurred())
fmt.Println("result.Version: ", result.Version())
delegateNetStatus, err := LoadNetworkStatus(result, delegate.Conf.Name, delegate.MasterPlugin)
delegateNetStatus, err := netutils.CreateNetworkStatus(result, delegate.Conf.Name, delegate.MasterPlugin, nil)
GinkgoT().Logf("delegateNetStatus %+v\n", delegateNetStatus)
Expect(err).To(HaveOccurred())
})
It("verify the network selection elements goes into delegateconf", func() {
cniConfig := `{
"name": "weave1",
"cniVersion": "0.2.0",
"type": "weave-net"
}`
bandwidthEntry1 := &BandwidthEntry{
IngressRate: 100,
IngressBurst: 200,
EgressRate: 100,
EgressBurst: 200,
}
portMapEntry1 := &PortMapEntry{
HostPort: 8080,
ContainerPort: 80,
Protocol: "tcp",
HostIP: "10.0.0.1",
}
networkSelection := &NetworkSelectionElement{
Name: "testname",
InterfaceRequest: "testIF1",
MacRequest: "c2:11:22:33:44:66",
InfinibandGUIDRequest: "24:8a:07:03:00:8d:ae:2e",
IPRequest: []string{"10.0.0.1/24"},
BandwidthRequest: bandwidthEntry1,
PortMappingsRequest: []*PortMapEntry{portMapEntry1},
}
delegateConf, err := LoadDelegateNetConf([]byte(cniConfig), networkSelection, "", "")
Expect(err).NotTo(HaveOccurred())
Expect(delegateConf.IfnameRequest).To(Equal(networkSelection.InterfaceRequest))
Expect(delegateConf.MacRequest).To(Equal(networkSelection.MacRequest))
Expect(delegateConf.InfinibandGUIDRequest).To(Equal(networkSelection.InfinibandGUIDRequest))
Expect(delegateConf.IPRequest).To(Equal(networkSelection.IPRequest))
Expect(delegateConf.BandwidthRequest).To(Equal(networkSelection.BandwidthRequest))
Expect(delegateConf.PortMappingsRequest).To(Equal(networkSelection.PortMappingsRequest))
})
It("test mergeCNIRuntimeConfig with masterPlugin", func() {
conf := `{
"name": "node-cni-network",
"type": "multus",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
"delegates": [{
"name": "weave",
"type": "weave-net"
}],
"runtimeConfig": {
"portMappings": [
{"hostPort": 8080, "containerPort": 80, "protocol": "tcp"}
]
}
}`
bandwidthEntry1 := &BandwidthEntry{
IngressRate: 100,
IngressBurst: 200,
EgressRate: 100,
EgressBurst: 200,
}
portMapEntry1 := &PortMapEntry{
HostPort: 8080,
ContainerPort: 80,
Protocol: "tcp",
HostIP: "10.0.0.1",
}
networkSelection := &NetworkSelectionElement{
Name: "testname",
InterfaceRequest: "testIF1",
MacRequest: "c2:11:22:33:44:66",
InfinibandGUIDRequest: "24:8a:07:03:00:8d:ae:2e",
IPRequest: []string{"10.0.0.1/24"},
BandwidthRequest: bandwidthEntry1,
PortMappingsRequest: []*PortMapEntry{portMapEntry1},
}
delegate, err := LoadDelegateNetConf([]byte(conf), networkSelection, "", "")
delegate.MasterPlugin = true
origRuntimeConfig := RuntimeConfig{}
Expect(err).NotTo(HaveOccurred())
runtimeConf := mergeCNIRuntimeConfig(&origRuntimeConfig, delegate)
Expect(runtimeConf.PortMaps).To(BeNil())
Expect(runtimeConf.Bandwidth).To(BeNil())
Expect(runtimeConf.InfinibandGUID).To(Equal(""))
// The original RuntimeConfig must have not been overwritten
Expect(origRuntimeConfig).To(Equal(RuntimeConfig{}))
})
It("test mergeCNIRuntimeConfig with delegate plugin", func() {
conf := `{
"name": "weave1",
"cniVersion": "0.2.0",
"type": "weave-net"
}`
bandwidthEntry1 := &BandwidthEntry{
IngressRate: 100,
IngressBurst: 200,
EgressRate: 100,
EgressBurst: 200,
}
portMapEntry1 := &PortMapEntry{
HostPort: 8080,
ContainerPort: 80,
Protocol: "tcp",
HostIP: "10.0.0.1",
}
networkSelection := &NetworkSelectionElement{
Name: "testname",
InterfaceRequest: "testIF1",
MacRequest: "c2:11:22:33:44:66",
InfinibandGUIDRequest: "24:8a:07:03:00:8d:ae:2e",
IPRequest: []string{"10.0.0.1/24"},
BandwidthRequest: bandwidthEntry1,
PortMappingsRequest: []*PortMapEntry{portMapEntry1},
}
origRuntimeConfig := RuntimeConfig{}
delegate, err := LoadDelegateNetConf([]byte(conf), networkSelection, "", "")
Expect(err).NotTo(HaveOccurred())
runtimeConf := mergeCNIRuntimeConfig(&origRuntimeConfig, delegate)
Expect(runtimeConf.PortMaps).NotTo(BeNil())
Expect(len(runtimeConf.PortMaps)).To(BeEquivalentTo(1))
Expect(runtimeConf.PortMaps[0]).To(Equal(portMapEntry1))
Expect(runtimeConf.Bandwidth).To(Equal(bandwidthEntry1))
Expect(len(runtimeConf.IPs)).To(BeEquivalentTo(1))
Expect(runtimeConf.Mac).To(Equal("c2:11:22:33:44:66"))
Expect(runtimeConf.InfinibandGUID).To(Equal("24:8a:07:03:00:8d:ae:2e"))
// The original RuntimeConfig must have not been overwritten
Expect(origRuntimeConfig).To(Equal(RuntimeConfig{}))
})
It("test DelegateConf Name is delivered", func() {
conf := `{
"name": "node-cni-network",
"type": "multus",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
"delegates": [{
"name": "weave",
"type": "weave-net"
}]
}`
n, err := LoadNetConf([]byte(conf))
Expect(err).NotTo(HaveOccurred())
Expect(len(n.Delegates)).To(BeEquivalentTo(1))
Expect(n.Delegates[0].Name).To(Equal("weave"))
})
It("test DelegateConfList Name is delivered", func() {
conf := `{
"name": "node-cni-network",
"type": "multus",
"kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
"delegates": [{
"name": "weave-list",
"plugins": [ {"type" :"weave"} ]
}]
}`
n, err := LoadNetConf([]byte(conf))
Expect(err).NotTo(HaveOccurred())
Expect(len(n.Delegates)).To(BeEquivalentTo(1))
Expect(n.Delegates[0].Name).To(Equal("weave-list"))
})
})

View File

@@ -21,7 +21,6 @@ import (
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NetConf for cni config file written in json
@@ -39,17 +38,20 @@ type NetConf struct {
// RawDelegates is private to the NetConf class; use Delegates instead
RawDelegates []map[string]interface{} `json:"delegates"`
Delegates []*DelegateNetConf `json:"-"`
NetStatus []*NetworkStatus `json:"-"`
Kubeconfig string `json:"kubeconfig"`
ClusterNetwork string `json:"clusterNetwork"`
DefaultNetworks []string `json:"defaultNetworks"`
LogFile string `json:"logFile"`
LogLevel string `json:"logLevel"`
LogToStderr bool `json:"logToStderr,omitempty"`
RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"`
// Default network readiness options
ReadinessIndicatorFile string `json:"readinessindicatorfile"`
// Option to isolate the usage of CR's to the namespace in which a pod resides.
NamespaceIsolation bool `json:"namespaceIsolation"`
NamespaceIsolation bool `json:"namespaceIsolation"`
RawNonIsolatedNamespaces string `json:"globalNamespaces"`
NonIsolatedNamespaces []string `json:"-"`
// Option to set system namespaces (to avoid to add defaultNetworks)
SystemNamespaces []string `json:"systemNamespaces"`
// Option to set the namespace that multus-cni uses (clusterNetwork/defaultNetworks)
@@ -58,70 +60,58 @@ type NetConf struct {
// RuntimeConfig specifies CNI RuntimeConfig
type RuntimeConfig struct {
PortMaps []PortMapEntry `json:"portMappings,omitempty"`
PortMaps []*PortMapEntry `json:"portMappings,omitempty"`
Bandwidth *BandwidthEntry `json:"bandwidth,omitempty"`
IPs []string `json:"ips,omitempty"`
Mac string `json:"mac,omitempty"`
InfinibandGUID string `json:"infinibandGUID,omitempty"`
DeviceID string `json:"deviceID,omitempty"`
CNIDeviceInfoFile string `json:"CNIDeviceInfoFile,omitempty"`
}
// PortMapEntry for CNI PortMapEntry
type PortMapEntry struct {
HostPort int `json:"hostPort"`
ContainerPort int `json:"containerPort"`
Protocol string `json:"protocol"`
Protocol string `json:"protocol,omitempty"`
HostIP string `json:"hostIP,omitempty"`
}
// NetworkStatus is for network status annotation for pod
type NetworkStatus struct {
Name string `json:"name"`
Interface string `json:"interface,omitempty"`
IPs []string `json:"ips,omitempty"`
Mac string `json:"mac,omitempty"`
Default bool `json:"default,omitempty"`
DNS types.DNS `json:"dns,omitempty"`
// BandwidthEntry for CNI BandwidthEntry
type BandwidthEntry struct {
IngressRate int `json:"ingressRate"`
IngressBurst int `json:"ingressBurst"`
EgressRate int `json:"egressRate"`
EgressBurst int `json:"egressBurst"`
}
// DelegateNetConf for net-attach-def for pod
type DelegateNetConf struct {
Conf types.NetConf
ConfList types.NetConfList
IfnameRequest string `json:"ifnameRequest,omitempty"`
MacRequest string `json:"macRequest,omitempty"`
IPRequest string `json:"ipRequest,omitempty"`
Conf types.NetConf
ConfList types.NetConfList
Name string
IfnameRequest string `json:"ifnameRequest,omitempty"`
MacRequest string `json:"macRequest,omitempty"`
InfinibandGUIDRequest string `json:"infinibandGUIDRequest,omitempty"`
IPRequest []string `json:"ipRequest,omitempty"`
PortMappingsRequest []*PortMapEntry `json:"-"`
BandwidthRequest *BandwidthEntry `json:"-"`
GatewayRequest []net.IP `json:"default-route,omitempty"`
IsFilterGateway bool
// MasterPlugin is only used internal housekeeping
MasterPlugin bool `json:"-"`
// Conflist plugin is only used internal housekeeping
ConfListPlugin bool `json:"-"`
// DeviceID is only used internal housekeeping
DeviceID string `json:"deviceID,omitempty"`
// ResourceName is only used internal housekeeping
ResourceName string `json:"resourceName,omitempty"`
// Raw JSON
Bytes []byte
}
// NetworkAttachmentDefinition represents net-attach-def of K8s NPWG spec
type NetworkAttachmentDefinition struct {
metav1.TypeMeta `json:",inline"`
// Note that ObjectMeta is mandatory, as an object
// name is required
Metadata metav1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
// Specification describing how to invoke a CNI plugin to
// add or remove network attachments for a Pod.
// In the absence of valid keys in a Spec, the runtime (or
// meta-plugin) should load and execute a CNI .configlist
// or .config (in that order) file on-disk whose JSON
// “name” key matches this Network objects name.
// +optional
Spec NetworkAttachmentDefinitionSpec `json:"spec"`
}
// NetworkAttachmentDefinitionSpec represents net-attach-def spec of K8s NPWG spec
type NetworkAttachmentDefinitionSpec struct {
// Config contains a standard JSON-encoded CNI configuration
// or configuration list which defines the plugin chain to
// execute. If present, this key takes precedence over
// Plugin.
// +optional
Config string `json:"config"`
}
// NetworkSelectionElement represents one element of the JSON format
// Network Attachment Selection Annotation as described in section 4.1.2
// of the CRD specification.
@@ -133,25 +123,40 @@ type NetworkSelectionElement struct {
Namespace string `json:"namespace,omitempty"`
// IPRequest contains an optional requested IP address for this network
// attachment
IPRequest string `json:"ips,omitempty"`
IPRequest []string `json:"ips,omitempty"`
// MacRequest contains an optional requested MAC address for this
// network attachment
MacRequest string `json:"mac,omitempty"`
// InfinibandGUID request contains an optional requested Infiniband GUID address
// for this network attachment
InfinibandGUIDRequest string `json:"infiniband-guid,omitempty"`
// InterfaceRequest contains an optional requested name for the
// network interface this attachment will create in the container
InterfaceRequest string `json:"interface,omitempty"`
// ObsoateInterfaceRequest is obsolated parameter at pre 3.2.
// DeprecatedInterfaceRequest is obsolated parameter at pre 3.2.
// This will be removed in 4.0 release.
ObsolatedInterfaceRequest string `json:"interfaceRequest,omitempty"`
DeprecatedInterfaceRequest string `json:"interfaceRequest,omitempty"`
// PortMappingsRequest contains an optional requested port mapping
// for the network
PortMappingsRequest []*PortMapEntry `json:"portMappings,omitempty"`
// BandwidthRequest contains an optional requested bandwidth for
// the network
BandwidthRequest *BandwidthEntry `json:"bandwidth,omitempty"`
// DeviceID contains an optional requested deviceID the network
DeviceID string `json:"deviceID,omitempty"`
// CNIArgs contains additional CNI arguments for the network interface
CNIArgs *map[string]interface{} `json:"cni-args"`
// GatewayRequest contains default route IP address for the pod
GatewayRequest []net.IP `json:"default-route,omitempty"`
}
// K8sArgs is the valid CNI_ARGS used for Kubernetes
type K8sArgs struct {
types.CommonArgs
IP net.IP
K8S_POD_NAME types.UnmarshallableString
K8S_POD_NAMESPACE types.UnmarshallableString
K8S_POD_INFRA_CONTAINER_ID types.UnmarshallableString
K8S_POD_NAME types.UnmarshallableString //revive:disable-line
K8S_POD_NAMESPACE types.UnmarshallableString //revive:disable-line
K8S_POD_INFRA_CONTAINER_ID types.UnmarshallableString //revive:disable-line
}
// ResourceInfo is struct to hold Pod device allocation information

View File

@@ -1,332 +0,0 @@
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package types
import (
"encoding/json"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/intel/multus-cni/logging"
)
const (
defaultCNIDir = "/var/lib/cni/multus"
defaultConfDir = "/etc/cni/multus/net.d"
defaultBinDir = "/opt/cni/bin"
defaultReadinessIndicatorFile = ""
defaultMultusNamespace = "kube-system"
)
// LoadDelegateNetConfList reads DelegateNetConf from bytes
func LoadDelegateNetConfList(bytes []byte, delegateConf *DelegateNetConf) error {
logging.Debugf("LoadDelegateNetConfList: %s, %v", string(bytes), delegateConf)
if err := json.Unmarshal(bytes, &delegateConf.ConfList); err != nil {
return logging.Errorf("err in unmarshalling delegate conflist: %v", err)
}
if delegateConf.ConfList.Plugins == nil {
return logging.Errorf("delegate must have the 'type'or 'Plugin' field")
}
if delegateConf.ConfList.Plugins[0].Type == "" {
return logging.Errorf("a plugin delegate must have the 'type' field")
}
delegateConf.ConfListPlugin = true
return nil
}
// LoadDelegateNetConf converts raw CNI JSON into a DelegateNetConf structure
func LoadDelegateNetConf(bytes []byte, net *NetworkSelectionElement, deviceID string) (*DelegateNetConf, error) {
var err error
logging.Debugf("LoadDelegateNetConf: %s, %v, %s", string(bytes), net, deviceID)
delegateConf := &DelegateNetConf{}
if err := json.Unmarshal(bytes, &delegateConf.Conf); err != nil {
return nil, logging.Errorf("error in LoadDelegateNetConf - unmarshalling delegate config: %v", err)
}
// Do some minimal validation
if delegateConf.Conf.Type == "" {
if err := LoadDelegateNetConfList(bytes, delegateConf); err != nil {
return nil, logging.Errorf("error in LoadDelegateNetConf: %v", err)
}
if deviceID != "" {
bytes, err = addDeviceIDInConfList(bytes, deviceID)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf(): failed to add deviceID in NetConfList bytes: %v", err)
}
}
} else {
if deviceID != "" {
bytes, err = delegateAddDeviceID(bytes, deviceID)
if err != nil {
return nil, logging.Errorf("LoadDelegateNetConf(): failed to add deviceID in NetConf bytes: %v", err)
}
}
}
if net != nil {
if net.InterfaceRequest != "" {
delegateConf.IfnameRequest = net.InterfaceRequest
}
if net.MacRequest != "" {
delegateConf.MacRequest = net.MacRequest
}
if net.IPRequest != "" {
delegateConf.IPRequest = net.IPRequest
}
}
delegateConf.Bytes = bytes
return delegateConf, nil
}
// CreateCNIRuntimeConf create CNI RuntimeConf
func CreateCNIRuntimeConf(args *skel.CmdArgs, k8sArgs *K8sArgs, ifName string, rc *RuntimeConfig) *libcni.RuntimeConf {
logging.Debugf("LoadCNIRuntimeConf: %v, %v, %s, %v", args, k8sArgs, ifName, rc)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#buildCNIRuntimeConf
// Todo
// ingress, egress and bandwidth capability features as same as kubelet.
rt := &libcni.RuntimeConf{
ContainerID: args.ContainerID,
NetNS: args.Netns,
IfName: ifName,
Args: [][2]string{
{"IgnoreUnknown", "1"},
{"K8S_POD_NAMESPACE", string(k8sArgs.K8S_POD_NAMESPACE)},
{"K8S_POD_NAME", string(k8sArgs.K8S_POD_NAME)},
{"K8S_POD_INFRA_CONTAINER_ID", string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID)},
},
}
if rc != nil {
rt.CapabilityArgs = map[string]interface{}{
"portMappings": rc.PortMaps,
}
}
return rt
}
// LoadNetworkStatus create network status from CNI result
func LoadNetworkStatus(r types.Result, netName string, defaultNet bool) (*NetworkStatus, error) {
logging.Debugf("LoadNetworkStatus: %v, %s, %t", r, netName, defaultNet)
netstatus := &NetworkStatus{}
netstatus.Name = netName
netstatus.Default = defaultNet
// Convert whatever the IPAM result was into the current Result type
result, err := current.NewResultFromResult(r)
if err != nil {
logging.Errorf("error convert the type.Result to current.Result: %v", err)
return netstatus, err
}
for _, ifs := range result.Interfaces {
//Only pod interfaces can have sandbox information
if ifs.Sandbox != "" {
netstatus.Interface = ifs.Name
netstatus.Mac = ifs.Mac
}
}
for _, ipconfig := range result.IPs {
if ipconfig.Version == "4" && ipconfig.Address.IP.To4() != nil {
netstatus.IPs = append(netstatus.IPs, ipconfig.Address.IP.String())
}
if ipconfig.Version == "6" && ipconfig.Address.IP.To16() != nil {
netstatus.IPs = append(netstatus.IPs, ipconfig.Address.IP.String())
}
}
netstatus.DNS = result.DNS
return netstatus, nil
}
// LoadNetConf converts inputs (i.e. stdin) to NetConf
func LoadNetConf(bytes []byte) (*NetConf, error) {
netconf := &NetConf{}
logging.Debugf("LoadNetConf: %s", string(bytes))
if err := json.Unmarshal(bytes, netconf); err != nil {
return nil, logging.Errorf("failed to load netconf: %v", err)
}
// Logging
if netconf.LogFile != "" {
logging.SetLogFile(netconf.LogFile)
}
if netconf.LogLevel != "" {
logging.SetLogLevel(netconf.LogLevel)
}
// Parse previous result
if netconf.RawPrevResult != nil {
resultBytes, err := json.Marshal(netconf.RawPrevResult)
if err != nil {
return nil, logging.Errorf("could not serialize prevResult: %v", err)
}
res, err := version.NewResult(netconf.CNIVersion, resultBytes)
if err != nil {
return nil, logging.Errorf("could not parse prevResult: %v", err)
}
netconf.RawPrevResult = nil
netconf.PrevResult, err = current.NewResultFromResult(res)
if err != nil {
return nil, logging.Errorf("could not convert result to current version: %v", err)
}
}
// Delegates must always be set. If no kubeconfig is present, the
// delegates are executed in-order. If a kubeconfig is present,
// at least one delegate must be present and the first delegate is
// the master plugin. Kubernetes CRD delegates are then appended to
// the existing delegate list and all delegates executed in-order.
if len(netconf.RawDelegates) == 0 && netconf.ClusterNetwork == "" {
return nil, logging.Errorf("at least one delegate/defaultNetwork must be specified")
}
if netconf.CNIDir == "" {
netconf.CNIDir = defaultCNIDir
}
if netconf.ConfDir == "" {
netconf.ConfDir = defaultConfDir
}
if netconf.BinDir == "" {
netconf.BinDir = defaultBinDir
}
if netconf.ReadinessIndicatorFile == "" {
netconf.ReadinessIndicatorFile = defaultReadinessIndicatorFile
}
if len(netconf.SystemNamespaces) == 0 {
netconf.SystemNamespaces = []string{"kube-system"}
}
if netconf.MultusNamespace == "" {
netconf.MultusNamespace = defaultMultusNamespace
}
// get RawDelegates and put delegates field
if netconf.ClusterNetwork == "" {
// for Delegates
if len(netconf.RawDelegates) == 0 {
return nil, logging.Errorf("at least one delegate must be specified")
}
for idx, rawConf := range netconf.RawDelegates {
bytes, err := json.Marshal(rawConf)
if err != nil {
return nil, logging.Errorf("error marshalling delegate %d config: %v", idx, err)
}
delegateConf, err := LoadDelegateNetConf(bytes, nil, "")
if err != nil {
return nil, logging.Errorf("failed to load delegate %d config: %v", idx, err)
}
netconf.Delegates = append(netconf.Delegates, delegateConf)
}
netconf.RawDelegates = nil
// First delegate is always the master plugin
netconf.Delegates[0].MasterPlugin = true
}
return netconf, nil
}
// AddDelegates appends the new delegates to the delegates list
func (n *NetConf) AddDelegates(newDelegates []*DelegateNetConf) error {
logging.Debugf("AddDelegates: %v", newDelegates)
n.Delegates = append(n.Delegates, newDelegates...)
return nil
}
// delegateAddDeviceID injects deviceID information in delegate bytes
func delegateAddDeviceID(inBytes []byte, deviceID string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("delegateAddDeviceID: failed to unmarshal inBytes: %v", err)
}
// Inject deviceID
rawConfig["deviceID"] = deviceID
rawConfig["pciBusID"] = deviceID
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("delegateAddDeviceID: failed to re-marshal Spec.Config: %v", err)
}
logging.Debugf("delegateAddDeviceID(): updated configBytes %s", string(configBytes))
return configBytes, nil
}
// addDeviceIDInConfList injects deviceID information in delegate bytes
func addDeviceIDInConfList(inBytes []byte, deviceID string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
err = json.Unmarshal(inBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("addDeviceIDInConfList(): failed to unmarshal inBytes: %v", err)
}
pList, ok := rawConfig["plugins"]
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList(): unable to get plugin list")
}
pMap, ok := pList.([]interface{})
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList(): unable to typecast plugin list")
}
firstPlugin, ok := pMap[0].(map[string]interface{})
if !ok {
return nil, logging.Errorf("addDeviceIDInConfList(): unable to typecast pMap")
}
// Inject deviceID
firstPlugin["deviceID"] = deviceID
firstPlugin["pciBusID"] = deviceID
configBytes, err := json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("addDeviceIDInConfList(): failed to re-marshal: %v", err)
}
logging.Debugf("addDeviceIDInConfList(): updated configBytes %s", string(configBytes))
return configBytes, nil
}
// CheckSystemNamespaces checks whether given namespace is in systemNamespaces or not.
func CheckSystemNamespaces(namespace string, systemNamespaces []string) bool {
for _, nsname := range systemNamespaces {
if namespace == nsname {
return true
}
}
return false
}

Some files were not shown because too many files have changed in this diff Show More