13 Commits

Author SHA1 Message Date
OpenShift Merge Robot
812441ce17 Merge pull request #67 from s1061123/dev/add_verbose_info
Bug 1835033: Logging improvement (UID, net-attach-def) / Backport to 4.4
2020-05-17 14:48:55 -07:00
OpenShift Merge Robot
d00e22aaca Merge pull request #61 from dougbtv/downstream-44-pick-ns-isolation-default
Bug 1827375: Allows allow pods in any namespace refer to net-attach-defs in default namespace
2020-05-13 23:16:08 -07:00
Tomofumi Hayashi
c6dca41433 Logging improvement (UID, net-attach-def)
This change adds pod UID and net-attach-def name in verbose log
and sends kubernetes event when net-attach-def is not found.
2020-05-13 00:06:08 +09:00
dougbtv
3430f77b81 Allows namespaceIsolation to allow pods in any namespace refer to the default namespace 2020-04-23 14:54:48 -04:00
OpenShift Merge Robot
a30bc622b0 Merge pull request #56 from openshift-cherrypick-robot/cherry-pick-54-to-release-4.4
Bug 1823490: [release-4.4] Adds readinessindicatorfile check on CNI DEL
2020-04-16 06:51:21 -07:00
dougbtv
916690bae4 Adds readinessindicatorfile check on CNI DEL 2020-04-02 22:40:05 +00:00
OpenShift Merge Robot
384bc19c84 Merge pull request #53 from dougbtv/no-config-invalidation-44
Bug 1809285: Removes configuration invalidation [4.4 backport]
2020-03-03 16:08:05 +01:00
Tomofumi Hayashi
55ec6a5c1c Removes configuration invalidation 2020-03-02 14:16:57 -05:00
OpenShift Merge Robot
e52c28c945 Merge pull request #49 from dougbtv/readiness-indicator-poll-44
Bug 1805774: Exposes readinessindicatorfile and uses wait.PollImmediate [backport 4.4]
2020-02-23 12:49:26 -05:00
dougbtv
e8726d7120 Changes wait.ExponentialBackoff to wait.PollImmediate and exposes readinessindicatorfile via entrypoint parameter 2020-02-21 14:13:49 -05:00
OpenShift Merge Robot
a4bcbc043c Merge pull request #43 from dougbtv/master-fix-readiness
Bug 1794142: Fixes unnecessary wait when readinessindicatorfile is not present
2020-02-12 16:28:16 +01:00
Dan Williams
be46ee25b6 multus: print pod namespace/name in cmdAdd/cmdDel error messages
Signed-off-by: Dan Williams <dcbw@redhat.com>
2020-02-11 10:58:14 -05:00
dougbtv
4ca7e30bf5 [bugfix] Fixes unnecessary wait when readinessindicatorfile is not present 2020-02-11 10:51:17 -05:00
11 changed files with 155 additions and 109 deletions

View File

@@ -117,6 +117,8 @@ You may configure the logging level by using the `LogLevel` option in your CNI c
The functionality provided by the `namespaceIsolation` configuration option enables a mode where Multus only allows pods to access custom resources (the `NetworkAttachmentDefinitions`) within the namespace where that pod resides. In other words, the `NetworkAttachmentDefinitions` are isolated to usage within the namespace in which they're created.
**NOTE**: The default namespace is special in this scenario. Even with namespace isolation enabled, any pod, in any namespace is allowed to refer to `NetworkAttachmentDefinitions` in the default namespace. This allows you to create commonly used unprivileged `NetworkAttachmentDefinitions` without having to put them in all namespaces. For example, if you had a `NetworkAttachmentDefinition` named `foo` the default namespace, you may reference it in an annotation with: `default/foo`.
For example, if a pod is created in the namespace called `development`, Multus will not allow networks to be attached when defined by custom resources created in a different namespace, say in the `default` network.
Consider the situation where you have a system that has users of different privilege levels -- as an example, a platform which has two administrators: a Senior Administrator and a Junior Administrator. The Senior Administrator may have access to all namespaces, and some network configurations as used by Multus are considered to be privileged in that they allow access to some protected resources available on the network. However, the Junior Administrator has access to only a subset of namespaces, and therefore it should be assumed that the Junior Administrator cannot create pods in their limited subset of namespaces. The `namespaceIsolation` feature provides for this isolation, allowing pods created in given namespaces to only access custom resources in the same namespace as the pod.

View File

@@ -619,3 +619,7 @@ When using `--multus-conf-file=auto` you may also care to specify a `binDir` in
Sometimes, you may wish to not have the entrypoint copy the binary file onto the host. Potentially, you have another way to copy in a specific version of Multus, for example. By default, it's always copied, but you may disable the copy with:
--skip-multus-binary-copy=true
If you wish to have auto configuration use the `readinessindicatorfile` in the configuration, you can use the `--readiness-indicator-file` to express which file should be used as the readiness indicator.
--readiness-indicator-file=/path/to/file

View File

@@ -3,20 +3,6 @@
# Always exit on errors.
set -e
# Run a clean up when we exit if configured to do so.
trap cleanup TERM
function cleanup {
if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == "true" ]; then
CONF=$(cat <<-EOF
{Multus configuration intentionally invalidated to prevent pods from being scheduled.}
EOF
)
echo $CONF > $CNI_CONF_DIR/00-multus.conf
log "Multus configuration intentionally invalidated to prevent pods from being scheduled."
fi
}
# Set our known directories.
CNI_CONF_DIR="/host/etc/cni/net.d"
CNI_BIN_DIR="/host/opt/cni/bin"
@@ -28,6 +14,7 @@ MULTUS_KUBECONFIG_FILE_HOST="/etc/cni/net.d/multus.d/multus.kubeconfig"
MULTUS_NAMESPACE_ISOLATION=false
MULTUS_LOG_LEVEL=""
MULTUS_LOG_FILE=""
MULTUS_READINESS_INDICATOR_FILE=""
OVERRIDE_NETWORK_NAME=false
MULTUS_CLEANUP_CONFIG_ON_EXIT=false
RESTART_CRIO=false
@@ -61,6 +48,7 @@ function usage()
echo -e "\t--override-network-name=false (used only with --multus-conf-file=auto)"
echo -e "\t--cleanup-config-on-exit=false (used only with --multus-conf-file=auto)"
echo -e "\t--rename-conf-file=false (used only with --multus-conf-file=auto)"
echo -e "\t--readiness-indicator-file=$MULTUS_READINESS_INDICATOR_FILE (used only with --multus-conf-file=auto)"
echo -e "\t--additional-bin-dir=$ADDITIONAL_BIN_DIR (adds binDir option to configuration, used only with --multus-conf-file=auto)"
echo -e "\t--restart-crio=false (restarts CRIO after config file is generated)"
}
@@ -137,6 +125,9 @@ while [ "$1" != "" ]; do
--skip-multus-binary-copy)
SKIP_BINARY_COPY=$VALUE
;;
--readiness-indicator-file)
MULTUS_READINESS_INDICATOR_FILE=$VALUE
;;
*)
warn "unknown parameter \"$PARAM\""
;;
@@ -250,13 +241,6 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
log "Attemping to find master plugin configuration, attempt $tries"
fi
let "tries+=1"
# See if the Multus configuration file exists, if it does then clean it up.
if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == true ] && [ -f "$CNI_CONF_DIR/00-multus.conf" ]; then
# But first, check if it has the invalidated configuration in it (otherwise we keep doing this over and over.)
if ! grep -q "invalidated" $CNI_CONF_DIR/00-multus.conf; then
cleanup
fi
fi
sleep 1;
else
error "Multus could not be configured: no master plugin was found."
@@ -305,6 +289,12 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
ADDITIONAL_BIN_DIR_STRING="\"binDir\": \"$ADDITIONAL_BIN_DIR\","
fi
READINESS_INDICATOR_FILE_STRING=""
if [ ! -z "${MULTUS_READINESS_INDICATOR_FILE// }" ]; then
READINESS_INDICATOR_FILE_STRING="\"readinessindicatorfile\": \"$MULTUS_READINESS_INDICATOR_FILE\","
fi
if [ "$OVERRIDE_NETWORK_NAME" == "true" ]; then
MASTER_PLUGIN_NET_NAME="$(cat $MULTUS_AUTOCONF_DIR/$MASTER_PLUGIN | \
python -c 'import json,sys;print json.load(sys.stdin)["name"]')"
@@ -324,6 +314,7 @@ if [ "$MULTUS_CONF_FILE" == "auto" ]; then
$LOG_LEVEL_STRING
$LOG_FILE_STRING
$ADDITIONAL_BIN_DIR_STRING
$READINESS_INDICATOR_FILE_STRING
"kubeconfig": "$MULTUS_KUBECONFIG_FILE_HOST",
"delegates": [
$MASTER_PLUGIN_JSON
@@ -363,8 +354,17 @@ if [ "$MULTUS_CLEANUP_CONFIG_ON_EXIT" == true ]; then
while true; do
# Check and see if the original master plugin configuration exists...
if [ ! -f "$MASTER_PLUGIN_LOCATION" ]; then
log "Master plugin @ $MASTER_PLUGIN_LOCATION has been deleted. Performing cleanup..."
cleanup
log "Master plugin @ $MASTER_PLUGIN_LOCATION has been deleted. Allowing 45 seconds for its restoration..."
sleep 10
for i in {1..35}
do
if [ -f "$MASTER_PLUGIN_LOCATION" ]; then
log "Master plugin @ $MASTER_PLUGIN_LOCATION was restored. Regenerating given configuration."
break
fi
sleep 1
done
generateMultusConf
log "Continuing watch loop after configuration regeneration..."
fi

View File

@@ -454,22 +454,22 @@ func GetK8sArgs(args *skel.CmdArgs) (*types.K8sArgs, error) {
// TryLoadPodDelegates attempts to load Kubernetes-defined delegates and add them to the Multus config.
// Returns the number of Kubernetes-defined delegates added or an error.
func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) (int, *ClientInfo, error) {
func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) (int, *v1.Pod, *ClientInfo, error) {
var err error
clientInfo := &ClientInfo{}
logging.Debugf("TryLoadPodDelegates: %v, %v, %v", k8sArgs, conf, kubeClient)
kubeClient, err = GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return 0, nil, err
return 0, nil, nil, err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return 0, nil, logging.Errorf("TryLoadPodDelegates: must have either Kubernetes config or delegates")
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: must have either Kubernetes config or delegates")
}
return 0, nil, nil
return 0, nil, nil, nil
}
setKubeClientInfo(clientInfo, kubeClient, k8sArgs)
@@ -477,12 +477,12 @@ func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient
pod, err := kubeClient.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
logging.Debugf("TryLoadPodDelegates: Err in loading K8s cluster default network from pod annotation: %v, use cached delegates", err)
return 0, nil, nil
return 0, nil, nil, nil
}
delegate, err := tryLoadK8sPodDefaultNetwork(kubeClient, pod, conf)
if err != nil {
return 0, nil, logging.Errorf("TryLoadPodDelegates: error in loading K8s cluster default network from pod annotation: %v", err)
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: error in loading K8s cluster default network from pod annotation: %v", err)
}
if delegate != nil {
logging.Debugf("TryLoadPodDelegates: Overwrite the cluster default network with %v from pod annotations", delegate)
@@ -496,13 +496,13 @@ func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient
if err != nil {
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, clientInfo, nil
return 0, nil, clientInfo, nil
}
return 0, nil, logging.Errorf("TryLoadPodDelegates: error in getting k8s network from pod: %v", err)
return 0, nil, nil, logging.Errorf("TryLoadPodDelegates: error in getting k8s network from pod: %v", err)
}
if err = conf.AddDelegates(delegates); err != nil {
return 0, nil, err
return 0, nil, nil, err
}
// Check gatewayRequest is configured in delegates
@@ -519,10 +519,10 @@ func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient
types.CheckGatewayConfig(conf.Delegates)
}
return len(delegates), clientInfo, nil
return len(delegates), pod, clientInfo, nil
}
return 0, clientInfo, nil
return 0, pod, clientInfo, nil
}
// GetK8sClient gets client info from kubeconfig
@@ -603,7 +603,10 @@ func GetNetworkDelegates(k8sclient KubeClient, pod *v1.Pod, networks []*types.Ne
// In the case that this is a mismatch when namespaceisolation is enabled, this should be an error.
if confnamespaceIsolation {
if defaultNamespace != net.Namespace {
return nil, logging.Errorf("GetNetworkDelegates: namespace isolation enabled, annotation violates permission, pod is in namespace %v but refers to target namespace %v", defaultNamespace, net.Namespace)
// There is an exception however, we always allow a reference to the default namespace.
if net.Namespace != "default" {
return nil, logging.Errorf("GetNetworkDelegates: namespace isolation enabled, annotation violates permission, pod is in namespace %v but refers to target namespace %v", defaultNamespace, net.Namespace)
}
}
}

View File

@@ -580,7 +580,7 @@ var _ = Describe("k8sclient operations", func() {
Expect(netConf.Delegates[0].Conf.Name).To(Equal("net2"))
Expect(netConf.Delegates[0].Conf.Type).To(Equal("mynet2"))
numK8sDelegates, _, err := TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
numK8sDelegates, _, _, err := TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
Expect(err).NotTo(HaveOccurred())
Expect(numK8sDelegates).To(Equal(0))
Expect(netConf.Delegates[0].Conf.Name).To(Equal("net1"))
@@ -617,7 +617,7 @@ var _ = Describe("k8sclient operations", func() {
Expect(err).To(HaveOccurred())
netConf.ConfDir = "badfilepath"
_, _, err = TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
_, _, _, err = TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
Expect(err).To(HaveOccurred())
})
@@ -650,7 +650,7 @@ var _ = Describe("k8sclient operations", func() {
k8sArgs, err := GetK8sArgs(args)
Expect(err).NotTo(HaveOccurred())
numK8sDelegates, _, err := TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
numK8sDelegates, _, _, err := TryLoadPodDelegates(k8sArgs, netConf, kubeClient)
Expect(err).NotTo(HaveOccurred())
Expect(numK8sDelegates).To(Equal(0))
Expect(netConf.Delegates[0].Conf.Name).To(Equal("net1"))
@@ -685,7 +685,7 @@ var _ = Describe("k8sclient operations", func() {
k8sArgs, err := GetK8sArgs(args)
Expect(err).NotTo(HaveOccurred())
_, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
_, _, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
Expect(err).To(HaveOccurred())
})
@@ -717,12 +717,12 @@ var _ = Describe("k8sclient operations", func() {
k8sArgs, err := GetK8sArgs(args)
Expect(err).NotTo(HaveOccurred())
_, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
_, _, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
Expect(err).NotTo(HaveOccurred())
// additionally, we expect the test to fail with no delegates, as at least one is always required.
netConf.Delegates = nil
_, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
_, _, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
Expect(err).To(HaveOccurred())
})
@@ -780,7 +780,7 @@ users:
k8sArgs, err := GetK8sArgs(args)
Expect(err).NotTo(HaveOccurred())
_, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
_, _, _, err = TryLoadPodDelegates(k8sArgs, netConf, nil)
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -41,6 +41,7 @@ import (
"github.com/intel/multus-cni/netutils"
"github.com/intel/multus-cni/types"
"github.com/vishvananda/netlink"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
@@ -48,12 +49,8 @@ var version = "master@git"
var commit = "unknown commit"
var date = "unknown date"
var defaultReadinessBackoff = wait.Backoff{
Steps: 4,
Duration: 250 * time.Millisecond,
Factor: 4.0,
Jitter: 0.1,
}
var pollDuration = 1000 * time.Millisecond
var pollTimeout = 45 * time.Second
func printVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
@@ -225,7 +222,7 @@ func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, e
return err
}
func delegateAdd(exec invoke.Exec, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
func delegateAdd(exec invoke.Exec, pod *v1.Pod, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v, %s", exec, ifName, delegate, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("delegateAdd: error setting envionment variable CNI_IFNAME")
@@ -290,21 +287,25 @@ func delegateAdd(exec invoke.Exec, ifName string, delegate *types.DelegateNetCon
if logging.GetLoggingLevel() >= logging.VerboseLevel {
data, _ := json.Marshal(result)
var confName string
var cniConfName string
if delegate.ConfListPlugin {
confName = delegate.ConfList.Name
cniConfName = delegate.ConfList.Name
} else {
confName = delegate.Conf.Name
cniConfName = delegate.Conf.Name
}
logging.Verbosef("Add: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(data))
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Add: %s:%s:%s:%s(%s):%s %s", rt.Args[1][1], rt.Args[2][1], podUID, delegate.Name, cniConfName, rt.IfName, string(data))
}
return result, nil
}
func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delegateDel: %v, %s, %v, %v, %s", exec, ifName, delegateConf, rt, binDir)
func delegateDel(exec invoke.Exec, pod *v1.Pod, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delegateDel: %v, %v, %s, %v, %v, %s", exec, pod, ifName, delegateConf, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateDel: error setting envionment variable CNI_IFNAME")
}
@@ -316,7 +317,11 @@ func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNe
} else {
confName = delegateConf.Conf.Name
}
logging.Verbosef("Del: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(delegateConf.Bytes))
podUID := "unknownUID"
if pod != nil {
podUID = string(pod.ObjectMeta.UID)
}
logging.Verbosef("Del: %s:%s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], podUID, confName, rt.IfName, string(delegateConf.Bytes))
}
var err error
@@ -335,8 +340,8 @@ func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNe
return err
}
func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateNetConf, lastIdx int, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delPlugins: %v, %s, %v, %d, %v, %s", exec, argIfname, delegates, lastIdx, rt, binDir)
func delPlugins(exec invoke.Exec, pod *v1.Pod, argIfname string, delegates []*types.DelegateNetConf, lastIdx int, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delPlugins: %v, %v, %s, %v, %d, %v, %s", exec, pod, argIfname, delegates, lastIdx, rt, binDir)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("delPlugins: error setting envionment variable CNI_COMMAND to a value of DEL")
}
@@ -346,7 +351,7 @@ func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateN
ifName := getIfname(delegates[idx], argIfname, idx)
rt.IfName = ifName
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, ifName, delegates[idx], rt, binDir); err != nil {
if err := delegateDel(exec, pod, ifName, delegates[idx], rt, binDir); err != nil {
errorstrings = append(errorstrings, err.Error())
}
}
@@ -359,45 +364,53 @@ func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateN
return nil
}
func cmdErr(k8sArgs *types.K8sArgs, format string, args ...interface{}) error {
prefix := "Multus: "
if k8sArgs != nil {
prefix += fmt.Sprintf("[%s/%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME)
}
return logging.Errorf(prefix+format, args...)
}
func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cnitypes.Result, error) {
n, err := types.LoadNetConf(args.StdinData)
logging.Debugf("cmdAdd: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: error loading netconf: %v", err)
return nil, cmdErr(nil, "error loading netconf: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, logging.Errorf("Multus: error getting k8s args: %v", err)
return nil, cmdErr(nil, "error getting k8s args: %v", err)
}
wait.ExponentialBackoff(defaultReadinessBackoff, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
switch {
case err == nil:
return true, nil
default:
return false, nil
if n.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return nil, cmdErr(k8sArgs, "PollImmediate error waiting for ReadinessIndicatorFile: %v", err)
}
})
}
if n.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: failed to get clusterNetwork/defaultNetworks: %v", err)
return nil, cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
_, kc, err := k8s.TryLoadPodDelegates(k8sArgs, n, kubeClient)
_, pod, kc, err := k8s.TryLoadPodDelegates(k8sArgs, n, kubeClient)
if err != nil {
return nil, logging.Errorf("Multus: error loading k8s delegates k8s args: %v", err)
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
}
// cache the multus config
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, logging.Errorf("Multus: error saving the delegates: %v", err)
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
}
var result, tmpResult cnitypes.Result
@@ -408,7 +421,7 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
runtimeConfig := types.MergeCNIRuntimeConfig(n.RuntimeConfig, delegate)
rt := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, runtimeConfig)
tmpResult, err = delegateAdd(exec, ifName, delegate, rt, n.BinDir, cniArgs)
tmpResult, err = delegateAdd(exec, pod, ifName, delegate, rt, n.BinDir, cniArgs)
if err != nil {
// If the add failed, tear down all networks we already added
netName := delegate.Conf.Name
@@ -416,8 +429,8 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
netName = delegate.ConfList.Name
}
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, args.IfName, n.Delegates, idx, rt, n.BinDir)
return nil, logging.Errorf("Multus: error adding pod to network %q: %v", netName, err)
_ = delPlugins(exec, nil, args.IfName, n.Delegates, idx, rt, n.BinDir)
return nil, cmdErr(k8sArgs, "error adding container to network %q: %v", netName, err)
}
// Remove gateway from routing table if the gateway is not used
@@ -438,7 +451,7 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
if deletegateway {
tmpResult, err = netutils.DeleteDefaultGW(args, ifName, &tmpResult)
if err != nil {
return nil, logging.Errorf("Multus: Err in deleting gateway: %v", err)
return nil, cmdErr(k8sArgs, "error deleting default gateway: %v", err)
}
}
@@ -446,7 +459,7 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
if adddefaultgateway {
tmpResult, err = netutils.SetDefaultGW(args, ifName, delegate.GatewayRequest, &tmpResult)
if err != nil {
return nil, logging.Errorf("Multus: Err in setting default gateway: %v", err)
return nil, cmdErr(k8sArgs, "error setting default gateway: %v", err)
}
}
@@ -460,7 +473,7 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
if !types.CheckSystemNamespaces(kc.Podnamespace, n.SystemNamespaces) {
delegateNetStatus, err := types.LoadNetworkStatus(tmpResult, delegate.Conf.Name, delegate.MasterPlugin)
if err != nil {
return nil, logging.Errorf("Multus: error setting network status: %v", err)
return nil, cmdErr(k8sArgs, "error setting network status: %v", err)
}
netStatus = append(netStatus, delegateNetStatus)
@@ -473,7 +486,7 @@ func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) (cn
if !types.CheckSystemNamespaces(kc.Podnamespace, n.SystemNamespaces) {
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
if err != nil {
return nil, logging.Errorf("Multus: error setting the networks status: %v", err)
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
}
}
}
@@ -511,7 +524,7 @@ func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) err
netnsfound = false
logging.Debugf("cmdDel: WARNING netns may not exist, netns: %s, err: %s", args.Netns, err)
} else {
return logging.Errorf("Multus: failed to open netns %q: %v", netns, err)
return cmdErr(nil, "failed to open netns %q: %v", netns, err)
}
}
@@ -521,7 +534,17 @@ func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) err
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return logging.Errorf("Multus: error getting k8s args: %v", err)
return cmdErr(nil, "error getting k8s args: %v", err)
}
if in.ReadinessIndicatorFile != "" {
err := wait.PollImmediate(pollDuration, pollTimeout, func() (bool, error) {
_, err := os.Stat(in.ReadinessIndicatorFile)
return err == nil, nil
})
if err != nil {
return cmdErr(k8sArgs, "PollImmediate error waiting for ReadinessIndicatorFile (on del): %v", err)
}
}
// Read the cache to get delegates json for the pod
@@ -532,29 +555,29 @@ func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) err
if in.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, in, kubeClient)
if err != nil {
return logging.Errorf("Multus: failed to get clusterNetwork/defaultNetworks: %v", err)
return cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// Get pod annotation and so on
_, _, err := k8s.TryLoadPodDelegates(k8sArgs, in, kubeClient)
_, _, _, err := k8s.TryLoadPodDelegates(k8sArgs, in, kubeClient)
if err != nil {
if len(in.Delegates) == 0 {
// No delegate available so send error
return logging.Errorf("Multus: failed to get delegates: %v", err)
return cmdErr(k8sArgs, "failed to get delegates: %v", err)
}
// Get clusterNetwork before, so continue to delete
logging.Errorf("Multus: failed to get delegates: %v, but continue to delete clusterNetwork", err)
}
} else {
return logging.Errorf("Multus: error reading the delegates: %v", err)
return cmdErr(k8sArgs, "error reading the delegates: %v", err)
}
} else {
defer os.Remove(path)
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return logging.Errorf("Multus: failed to load netconf: %v", err)
return cmdErr(k8sArgs, "failed to load netconf: %v", err)
}
// check plugins field and enable ConfListPlugin if there is
for _, v := range in.Delegates {
@@ -589,8 +612,16 @@ func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient k8s.KubeClient) err
}
}
kubeClient, err = k8s.GetK8sClient(in.Kubeconfig, kubeClient)
var pod *v1.Pod
if kubeClient != nil {
podName := string(k8sArgs.K8S_POD_NAME)
podNamespace := string(k8sArgs.K8S_POD_NAMESPACE)
pod, _ = kubeClient.GetPod(podNamespace, podName)
}
rt := types.CreateCNIRuntimeConf(args, k8sArgs, "", in.RuntimeConfig)
return delPlugins(exec, args.IfName, in.Delegates, len(in.Delegates)-1, rt, in.BinDir)
return delPlugins(exec, pod, args.IfName, in.Delegates, len(in.Delegates)-1, rt, in.BinDir)
}
func main() {

View File

@@ -1169,7 +1169,7 @@ var _ = Describe("multus operations", func() {
_, err = cmdAdd(args, fExec, nil)
Expect(fExec.addIndex).To(Equal(2))
Expect(fExec.delIndex).To(Equal(2))
Expect(err).To(MatchError("Multus: error adding pod to network \"other1\": delegateAdd: error invoking DelegateAdd - \"other-plugin\": error in getting result from AddNetwork: expected plugin failure"))
Expect(err).To(MatchError("Multus: [/]: error adding container to network \"other1\": delegateAdd: error invoking DelegateAdd - \"other-plugin\": error in getting result from AddNetwork: expected plugin failure"))
// Cleanup default network file.
if _, errStat := os.Stat(configPath); errStat == nil {
@@ -1805,7 +1805,7 @@ var _ = Describe("multus operations", func() {
err = cmdDel(args, fExec, fKubeClient)
Expect(err).NotTo(HaveOccurred())
Expect(fExec.delIndex).To(Equal(len(fExec.plugins)))
Expect(fKubeClient.PodCount).To(Equal(3))
Expect(fKubeClient.PodCount).To(Equal(4))
Expect(fKubeClient.NetCount).To(Equal(1))
})
@@ -1886,7 +1886,7 @@ var _ = Describe("multus operations", func() {
err = cmdDel(args, fExec, fKubeClient)
Expect(err).NotTo(HaveOccurred())
Expect(fExec.delIndex).To(Equal(len(fExec.plugins)))
Expect(fKubeClient.PodCount).To(Equal(4))
Expect(fKubeClient.PodCount).To(Equal(5))
Expect(fKubeClient.NetCount).To(Equal(2))
})

View File

@@ -149,6 +149,7 @@ func NewFakePod(name string, netAnnotation string, defaultNetAnnotation string)
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "test",
UID: "testUID",
},
Spec: v1.PodSpec{
Containers: []v1.Container{

View File

@@ -97,6 +97,9 @@ func LoadDelegateNetConf(bytes []byte, net *NetworkSelectionElement, deviceID st
}
if net != nil {
if net.Name != "" {
delegateConf.Name = net.Name
}
if net.InterfaceRequest != "" {
delegateConf.IfnameRequest = net.InterfaceRequest
}
@@ -160,6 +163,7 @@ func CreateCNIRuntimeConf(args *skel.CmdArgs, k8sArgs *K8sArgs, ifName string, r
ContainerID: args.ContainerID,
NetNS: args.Netns,
IfName: ifName,
// NOTE: Verbose logging depends on this order, so please keep Args order.
Args: [][2]string{
{"IgnoreUnknown", string("true")},
{"K8S_POD_NAMESPACE", string(k8sArgs.K8S_POD_NAMESPACE)},

View File

@@ -368,17 +368,17 @@ var _ = Describe("config operations", func() {
"args1": "val1"
}`
type bridgeNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "")
@@ -404,17 +404,17 @@ var _ = Describe("config operations", func() {
"args1": "val1a"
}`
type bridgeNetConf struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
Name string `json:"name"`
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "")
@@ -439,20 +439,20 @@ var _ = Describe("config operations", func() {
"args1": "val1"
}`
type bridgeNetConf struct {
Type string `json:"type"`
Args struct {
Type string `json:"type"`
Args struct {
CNI map[string]string `json:"cni"`
} `json:"args"`
}
type bridgeNetConfList struct {
Name string `json:"name"`
Name string `json:"name"`
Plugins []*bridgeNetConf `json:"plugins"`
}
err := json.Unmarshal([]byte(cniArgs), &args)
err := json.Unmarshal([]byte(cniArgs), &args)
Expect(err).NotTo(HaveOccurred())
net := &NetworkSelectionElement{
Name: "test-elem",
net := &NetworkSelectionElement{
Name: "test-elem",
CNIArgs: &args,
}
delegateNetConf, err := LoadDelegateNetConf([]byte(conf), net, "")

View File

@@ -94,6 +94,7 @@ type NetworkStatus struct {
type DelegateNetConf struct {
Conf types.NetConf
ConfList types.NetConfList
Name string
IfnameRequest string `json:"ifnameRequest,omitempty"`
MacRequest string `json:"macRequest,omitempty"`
IPRequest []string `json:"ipRequest,omitempty"`