mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2025-09-17 15:07:14 +00:00
[increment][refactor] refactored to use podname/namespace instead of claim UID. I think I'm missing default networks for DRA items
This commit is contained in:
@@ -6,6 +6,7 @@ metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
config: '{
|
||||
"name": "bridge-net",
|
||||
"cniVersion": "0.4.0",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
|
@@ -18,6 +18,7 @@ import (
|
||||
configapi "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/dra/api/multus-cni.io/resource/net/v1alpha1"
|
||||
multusk8sutils "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
multustypes "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
|
||||
cdiapi "tags.cncf.io/container-device-interface/pkg/cdi"
|
||||
cdispec "tags.cncf.io/container-device-interface/specs-go"
|
||||
@@ -202,7 +203,21 @@ func (s *DeviceState) prepareDevices(claim *resourceapi.ResourceClaim) (Prepared
|
||||
results = append(results, &claim.Status.Allocation.Devices.Results[i])
|
||||
}
|
||||
|
||||
perDeviceCDIContainerEdits, err := s.applyConfig(netConfig, results, claim.Namespace, string(claim.UID))
|
||||
var podName, podNamespace string
|
||||
|
||||
for _, owner := range claim.OwnerReferences {
|
||||
if owner.Kind == "Pod" && owner.Name != "" {
|
||||
podName = owner.Name
|
||||
podNamespace = claim.Namespace
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if podName == "" {
|
||||
return nil, fmt.Errorf("could not determine owning pod from claim metadata")
|
||||
}
|
||||
|
||||
perDeviceCDIContainerEdits, err := s.applyConfig(netConfig, results, podName, podNamespace, string(claim.UID))
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply CDI container edits: %w", err)
|
||||
@@ -228,6 +243,7 @@ func (s *DeviceState) prepareDevices(claim *resourceapi.ResourceClaim) (Prepared
|
||||
func (s *DeviceState) applyConfig(
|
||||
config *configapi.NetConfig,
|
||||
results []*resourceapi.DeviceRequestAllocationResult,
|
||||
podName string,
|
||||
podNamespace string,
|
||||
claimUID string,
|
||||
) (PerDeviceCDIContainerEdits, error) {
|
||||
@@ -248,11 +264,10 @@ func (s *DeviceState) applyConfig(
|
||||
|
||||
klog.Infof("!bang: Whole net-attach-def: %+v", nad)
|
||||
|
||||
delegate := &types.DelegateNetConf{
|
||||
Name: net.Name,
|
||||
}
|
||||
if err := json.Unmarshal([]byte(nad.Spec.Config), &delegate.Conf); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal NAD config into Conf: %w", err)
|
||||
delegate, err := multustypes.LoadDelegateNetConf([]byte(nad.Spec.Config), net, "", "")
|
||||
if err != nil {
|
||||
// Handle error loading delegate
|
||||
return nil, fmt.Errorf("failed to load delegate netconf from NAD %s/%s: %w", net.Namespace, net.Name, err)
|
||||
}
|
||||
|
||||
// Preserve ifname from network selection
|
||||
@@ -264,7 +279,7 @@ func (s *DeviceState) applyConfig(
|
||||
}
|
||||
|
||||
// Save delegates to a file
|
||||
delegatesPath := filepath.Join("/run/k8s.cni.cncf.io/dra", claimUID+".json")
|
||||
delegatesPath := filepath.Join("/run/k8s.cni.cncf.io/dra", fmt.Sprintf("%s_%s.json", podNamespace, podName))
|
||||
if err := os.MkdirAll(filepath.Dir(delegatesPath), 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure delegate output dir: %w", err)
|
||||
}
|
||||
@@ -281,6 +296,8 @@ func (s *DeviceState) applyConfig(
|
||||
envs := []string{
|
||||
fmt.Sprintf("MULTUS_DRA_DEVICE_NAME=%s", result.Device),
|
||||
fmt.Sprintf("MULTUS_DRA_NETWORKS=%s", config.Networks),
|
||||
fmt.Sprintf("MULTUS_DRA_POD_NAMESPACE=%s", podNamespace),
|
||||
fmt.Sprintf("MULTUS_DRA_POD_NAME=%s", podName),
|
||||
fmt.Sprintf("MULTUS_DRA_CLAIM_UID=%s", claimUID),
|
||||
}
|
||||
|
||||
|
27
pkg/drahelpers/drahelpers.go
Normal file
27
pkg/drahelpers/drahelpers.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package drahelpers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types"
|
||||
)
|
||||
|
||||
const DRADelegateDir = "/run/k8s.cni.cncf.io/dra"
|
||||
|
||||
func LoadDelegatesFromDRAFile(podName string, podNamespace string) ([]*types.DelegateNetConf, error) {
|
||||
|
||||
delegatePath := fmt.Sprintf("%s/%s_%s.json", DRADelegateDir, podNamespace, podName)
|
||||
data, err := os.ReadFile(delegatePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read delegate file %s: %w", delegatePath, err)
|
||||
}
|
||||
|
||||
var delegates []*types.DelegateNetConf
|
||||
if err := json.Unmarshal(data, &delegates); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal delegate JSON: %w", err)
|
||||
}
|
||||
|
||||
return delegates, nil
|
||||
}
|
@@ -39,6 +39,7 @@ import (
|
||||
k8snet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/drahelpers"
|
||||
k8s "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/k8sclient"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging"
|
||||
"gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/netutils"
|
||||
@@ -647,7 +648,26 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := GetPod(kubeClient, k8sArgs, false)
|
||||
// !bang. This is where we're going to enter the netns and get the env variables.
|
||||
|
||||
var pod *v1.Pod
|
||||
|
||||
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
|
||||
// This will only be initialized once and all delegate objects can reference this to look up device info.
|
||||
var resourceMap map[string]*types.ResourceInfo
|
||||
|
||||
// !bang here we're probably going to have to get the delegates another way if we have the env vars.
|
||||
// then we conditionally do this.
|
||||
|
||||
// Load from env-based file
|
||||
var kc *k8s.ClientInfo
|
||||
useddra := false
|
||||
delegates, err := drahelpers.LoadDelegatesFromDRAFile(string(k8sArgs.K8S_POD_NAME), string(k8sArgs.K8S_POD_NAMESPACE))
|
||||
if err != nil {
|
||||
// Fall back to usual kube-based logic
|
||||
logging.Errorf("CmdAdd: No DRA delegates from env in netns: %v", err)
|
||||
|
||||
pod, err = GetPod(kubeClient, k8sArgs, false)
|
||||
if err != nil {
|
||||
if err == errPodNotFound {
|
||||
logging.Verbosef("CmdAdd: Warning: pod [%s/%s] not found, exiting with empty CNI result", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME)
|
||||
@@ -658,10 +678,6 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
|
||||
// This will only be initialized once and all delegate objects can reference this to look up device info.
|
||||
var resourceMap map[string]*types.ResourceInfo
|
||||
|
||||
if n.ClusterNetwork != "" {
|
||||
resourceMap, err = k8s.GetDefaultNetworks(pod, n, kubeClient, resourceMap)
|
||||
if err != nil {
|
||||
@@ -671,11 +687,23 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
n.Delegates[0].MasterPlugin = true
|
||||
}
|
||||
|
||||
_, kc, err := k8s.TryLoadPodDelegates(pod, n, kubeClient, resourceMap)
|
||||
_, kc, err = k8s.TryLoadPodDelegates(pod, n, kubeClient, resourceMap)
|
||||
if err != nil {
|
||||
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
|
||||
}
|
||||
|
||||
} else if len(delegates) > 0 {
|
||||
// Successfully loaded delegates from env, use them
|
||||
logging.Verbosef("CmdAdd: Successfully loaded %d DRA delegates from env in netns for pod [%s/%s]", len(delegates), k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME)
|
||||
n.Delegates = delegates
|
||||
useddra = true
|
||||
} else {
|
||||
// I think this is just like there's no additional delegates?
|
||||
}
|
||||
|
||||
// if we have the env var -- then here we'd load the delegates from file.
|
||||
// then lets put them in n.Delegates so we can use the following logic.
|
||||
|
||||
// cache the multus config
|
||||
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
|
||||
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
|
||||
@@ -792,7 +820,8 @@ func CmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (c
|
||||
}
|
||||
|
||||
// Create the network statuses, only in case Multus has kubeconfig
|
||||
if kubeClient != nil && kc != nil {
|
||||
// !bang TODO: importantly, we're skipping this when using DRA for now, because we just want to "not use k8s at CNI time" with this pattern.
|
||||
if kubeClient != nil && kc != nil && !useddra {
|
||||
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
|
||||
delegateNetStatuses, err := nadutils.CreateNetworkStatuses(tmpResult, delegate.Name, delegate.MasterPlugin, devinfo)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user