Merge pull request #1490 from tsorya/jkary-fix-status-gc-no-pod-context

Fix thick plugin STATUS and GC handling for plugin-level commands
This commit is contained in:
Ben Pickard
2026-03-23 15:39:43 -04:00
committed by GitHub
3 changed files with 90 additions and 19 deletions

View File

@@ -378,6 +378,7 @@ func conflistStatus(rt *libcni.RuntimeConf, rawnetconflist []byte, multusNetconf
}
if gt, _ := cniversion.GreaterThanOrEqualTo(confList.CNIVersion, "1.1.0"); !gt {
logging.Debugf("conflistStatus: skipping STATUS for network list %q cniVersion %q (< 1.1.0)", confList.Name, confList.CNIVersion)
return nil
}
err = cniNet.GetStatusNetworkList(context.Background(), confList)

View File

@@ -84,6 +84,7 @@ func printCmdArgs(args *skel.CmdArgs) string {
// HandleCNIRequest is the CNI server handler function; it is invoked whenever
// a CNI request is processed.
// Note: k8sArgs may be nil for plugin-level commands (STATUS, GC) that have no pod context.
func (s *Server) HandleCNIRequest(cmd string, k8sArgs *types.K8sArgs, cniCmdArgs *skel.CmdArgs) ([]byte, error) {
var result []byte
var err error
@@ -434,9 +435,14 @@ func (s *Server) handleCNIRequest(r *http.Request) ([]byte, error) {
return nil, fmt.Errorf("could not extract the CNI command args: %w", err)
}
k8sArgs, err := kubernetesRuntimeArgs(cr.Env, s.kubeclient)
if err != nil {
return nil, fmt.Errorf("could not extract the kubernetes runtime args: %w", err)
// STATUS and GC are plugin-level commands with no pod context,
// so they don't have K8S_POD_NAME/K8S_POD_NAMESPACE in CNI_ARGS.
var k8sArgs *types.K8sArgs
if cmdType != "STATUS" && cmdType != "GC" {
k8sArgs, err = kubernetesRuntimeArgs(cr.Env, s.kubeclient)
if err != nil {
return nil, fmt.Errorf("could not extract the kubernetes runtime args: %w", err)
}
}
result, err := s.HandleCNIRequest(cmdType, k8sArgs, cniCmdArgs)
@@ -514,6 +520,18 @@ func (s *Server) extractCniData(cniRequest *api.Request, overrideConf []byte) (s
}
cniCmdArgs := &skel.CmdArgs{}
// STATUS and GC are plugin-level commands with no pod context;
// they don't require CNI_CONTAINERID, CNI_NETNS, or CNI_ARGS.
if cmd == "STATUS" || cmd == "GC" {
var err error
cniCmdArgs.StdinData, err = overrideCNIConfigWithServerConfig(cniRequest.Config, overrideConf, s.ignoreReadinessIndicator)
if err != nil {
return "", nil, err
}
return cmd, cniCmdArgs, nil
}
cniCmdArgs.ContainerID, ok = cniRequest.Env["CNI_CONTAINERID"]
if !ok {
return "", nil, fmt.Errorf("missing CNI_CONTAINERID")
@@ -647,25 +665,13 @@ func (s *Server) cmdCheck(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
return multus.CmdCheck(cmdArgs, s.exec, s.kubeclient)
}
func (s *Server) cmdGC(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
namespace := string(k8sArgs.K8S_POD_NAMESPACE)
podName := string(k8sArgs.K8S_POD_NAME)
if namespace == "" || podName == "" {
return fmt.Errorf("required CNI variable missing. pod name: %s; pod namespace: %s", podName, namespace)
}
logging.Debugf("CmdGC for [%s/%s]. CNI conf: %+v", namespace, podName, *cmdArgs)
func (s *Server) cmdGC(cmdArgs *skel.CmdArgs, _ *types.K8sArgs) error {
logging.Debugf("CmdGC. CNI conf: %+v", *cmdArgs)
return multus.CmdGC(cmdArgs, s.exec, s.kubeclient)
}
func (s *Server) cmdStatus(cmdArgs *skel.CmdArgs, k8sArgs *types.K8sArgs) error {
namespace := string(k8sArgs.K8S_POD_NAMESPACE)
podName := string(k8sArgs.K8S_POD_NAME)
if namespace == "" || podName == "" {
return fmt.Errorf("required CNI variable missing. pod name: %s; pod namespace: %s", podName, namespace)
}
logging.Debugf("CmdStatus for [%s/%s]. CNI conf: %+v", namespace, podName, *cmdArgs)
func (s *Server) cmdStatus(cmdArgs *skel.CmdArgs, _ *types.K8sArgs) error {
logging.Debugf("CmdStatus. CNI conf: %+v", *cmdArgs)
return multus.CmdStatus(cmdArgs, s.exec, s.kubeclient)
}

View File

@@ -91,6 +91,70 @@ var _ = Describe(suiteName, func() {
})
})
Context("STATUS and GC commands without pod context", func() {
const configPath = "/tmp/foo.multus.conf"
var (
cniServer *Server
K8sClient *k8s.ClientInfo
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
var err error
K8sClient = fakeK8sClient()
// Touch the default network file.
_, err = os.OpenFile(configPath, os.O_RDONLY|os.O_CREATE, 0755)
Expect(err).NotTo(HaveOccurred())
Expect(FilesystemPreRequirements(thickPluginRunDir)).To(Succeed())
ctx, cancel = context.WithCancel(context.TODO())
cniServer, err = startCNIServer(ctx, thickPluginRunDir, K8sClient, nil)
Expect(err).NotTo(HaveOccurred())
// Only set CNI_COMMAND — no CNI_CONTAINERID, CNI_NETNS, or CNI_ARGS
// to simulate how kubelet invokes STATUS/GC (plugin-level, no pod context).
os.Unsetenv("CNI_CONTAINERID")
os.Unsetenv("CNI_NETNS")
os.Unsetenv("CNI_ARGS")
})
AfterEach(func() {
cancel()
if _, errStat := os.Stat(configPath); errStat == nil {
Expect(os.Remove(configPath)).To(Succeed())
}
unregisterMetrics(cniServer)
Expect(cniServer.Close()).To(Succeed())
os.Unsetenv("CNI_COMMAND")
os.Unsetenv("CNI_ARGS")
})
It("STATUS succeeds with CNI_ARGS unset", func() {
Expect(os.Setenv("CNI_COMMAND", "STATUS")).NotTo(HaveOccurred())
Expect(api.CmdStatus(cniCmdArgs("", "", "", referenceConfig(thickPluginRunDir)))).To(Succeed())
})
It("GC succeeds with CNI_ARGS unset", func() {
Expect(os.Setenv("CNI_COMMAND", "GC")).NotTo(HaveOccurred())
Expect(api.CmdGC(cniCmdArgs("", "", "", referenceConfig(thickPluginRunDir)))).To(Succeed())
})
It("STATUS succeeds with CNI_ARGS empty", func() {
Expect(os.Setenv("CNI_COMMAND", "STATUS")).NotTo(HaveOccurred())
Expect(os.Setenv("CNI_ARGS", "")).NotTo(HaveOccurred())
Expect(api.CmdStatus(cniCmdArgs("", "", "", referenceConfig(thickPluginRunDir)))).To(Succeed())
})
It("GC succeeds with CNI_ARGS empty", func() {
Expect(os.Setenv("CNI_COMMAND", "GC")).NotTo(HaveOccurred())
Expect(os.Setenv("CNI_ARGS", "")).NotTo(HaveOccurred())
Expect(api.CmdGC(cniCmdArgs("", "", "", referenceConfig(thickPluginRunDir)))).To(Succeed())
})
})
Context("CNI operations started from the shim", func() {
const (
containerID = "123456789"