mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
dockershim: use network PluginManager to synchronize pod network operations
This commit is contained in:
parent
60525801c1
commit
aafd5c9ef6
@ -103,7 +103,7 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (str
|
|||||||
// on the host as well, to satisfy parts of the pod spec that aren't
|
// on the host as well, to satisfy parts of the pod spec that aren't
|
||||||
// recognized by the CNI standard yet.
|
// recognized by the CNI standard yet.
|
||||||
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
|
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
|
||||||
err = ds.networkPlugin.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID)
|
err = ds.network.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID)
|
||||||
// TODO: Do we need to teardown on failure or can we rely on a StopPodSandbox call with the given ID?
|
// TODO: Do we need to teardown on failure or can we rely on a StopPodSandbox call with the given ID?
|
||||||
return createResp.ID, err
|
return createResp.ID, err
|
||||||
}
|
}
|
||||||
@ -162,8 +162,8 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
|||||||
errList := []error{}
|
errList := []error{}
|
||||||
if needNetworkTearDown {
|
if needNetworkTearDown {
|
||||||
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
|
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
|
||||||
if err := ds.networkPlugin.TearDownPod(namespace, name, cID); err != nil {
|
if err := ds.network.TearDownPod(namespace, name, cID); err != nil {
|
||||||
errList = append(errList, fmt.Errorf("failed to teardown sandbox %q for pod %s/%s: %v", podSandboxID, namespace, name, err))
|
errList = append(errList, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil {
|
if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil {
|
||||||
@ -199,12 +199,12 @@ func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (st
|
|||||||
}
|
}
|
||||||
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", metadata.Namespace, metadata.Name)
|
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", metadata.Namespace, metadata.Name)
|
||||||
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
|
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
|
||||||
networkStatus, err := ds.networkPlugin.GetPodNetworkStatus(metadata.Namespace, metadata.Name, cID)
|
networkStatus, err := ds.network.GetPodNetworkStatus(metadata.Namespace, metadata.Name, cID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This might be a sandbox that somehow ended up without a default
|
// This might be a sandbox that somehow ended up without a default
|
||||||
// interface (eth0). We can't distinguish this from a more serious
|
// interface (eth0). We can't distinguish this from a more serious
|
||||||
// error, so callers should probably treat it as non-fatal.
|
// error, so callers should probably treat it as non-fatal.
|
||||||
return "", fmt.Errorf("%v: %v", msg, err)
|
return "", err
|
||||||
}
|
}
|
||||||
if networkStatus == nil {
|
if networkStatus == nil {
|
||||||
return "", fmt.Errorf("%v: invalid network status for", msg)
|
return "", fmt.Errorf("%v: invalid network status for", msg)
|
||||||
@ -408,7 +408,7 @@ func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig
|
|||||||
}
|
}
|
||||||
hc.CgroupParent = cgroupParent
|
hc.CgroupParent = cgroupParent
|
||||||
// Apply security context.
|
// Apply security context.
|
||||||
applySandboxSecurityContext(lc, createConfig.Config, hc, ds.networkPlugin, separator)
|
applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -146,7 +147,7 @@ func TestSandboxStatus(t *testing.T) {
|
|||||||
func TestNetworkPluginInvocation(t *testing.T) {
|
func TestNetworkPluginInvocation(t *testing.T) {
|
||||||
ds, _, _ := newTestDockerService()
|
ds, _, _ := newTestDockerService()
|
||||||
mockPlugin := newTestNetworkPlugin(t)
|
mockPlugin := newTestNetworkPlugin(t)
|
||||||
ds.networkPlugin = mockPlugin
|
ds.network = network.NewPluginManager(mockPlugin)
|
||||||
defer mockPlugin.Finish()
|
defer mockPlugin.Finish()
|
||||||
|
|
||||||
name := "foo0"
|
name := "foo0"
|
||||||
@ -158,6 +159,7 @@ func TestNetworkPluginInvocation(t *testing.T) {
|
|||||||
)
|
)
|
||||||
cID := kubecontainer.ContainerID{Type: runtimeName, ID: fmt.Sprintf("/%v", makeSandboxName(c))}
|
cID := kubecontainer.ContainerID{Type: runtimeName, ID: fmt.Sprintf("/%v", makeSandboxName(c))}
|
||||||
|
|
||||||
|
mockPlugin.EXPECT().Name().Return("mockNetworkPlugin").AnyTimes()
|
||||||
setup := mockPlugin.EXPECT().SetUpPod(ns, name, cID)
|
setup := mockPlugin.EXPECT().SetUpPod(ns, name, cID)
|
||||||
// StopPodSandbox performs a lookup on status to figure out if the sandbox
|
// StopPodSandbox performs a lookup on status to figure out if the sandbox
|
||||||
// is running with hostnetworking, as all its given is the ID.
|
// is running with hostnetworking, as all its given is the ID.
|
||||||
@ -175,7 +177,7 @@ func TestNetworkPluginInvocation(t *testing.T) {
|
|||||||
func TestHostNetworkPluginInvocation(t *testing.T) {
|
func TestHostNetworkPluginInvocation(t *testing.T) {
|
||||||
ds, _, _ := newTestDockerService()
|
ds, _, _ := newTestDockerService()
|
||||||
mockPlugin := newTestNetworkPlugin(t)
|
mockPlugin := newTestNetworkPlugin(t)
|
||||||
ds.networkPlugin = mockPlugin
|
ds.network = network.NewPluginManager(mockPlugin)
|
||||||
defer mockPlugin.Finish()
|
defer mockPlugin.Finish()
|
||||||
|
|
||||||
name := "foo0"
|
name := "foo0"
|
||||||
|
@ -178,7 +178,7 @@ func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot str
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err)
|
return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err)
|
||||||
}
|
}
|
||||||
ds.networkPlugin = plug
|
ds.network = network.NewPluginManager(plug)
|
||||||
glog.Infof("Docker cri networking managed by %v", plug.Name())
|
glog.Infof("Docker cri networking managed by %v", plug.Name())
|
||||||
|
|
||||||
// NOTE: cgroup driver is only detectable in docker 1.11+
|
// NOTE: cgroup driver is only detectable in docker 1.11+
|
||||||
@ -224,7 +224,7 @@ type dockerService struct {
|
|||||||
podSandboxImage string
|
podSandboxImage string
|
||||||
streamingRuntime *streamingRuntime
|
streamingRuntime *streamingRuntime
|
||||||
streamingServer streaming.Server
|
streamingServer streaming.Server
|
||||||
networkPlugin network.NetworkPlugin
|
network *network.PluginManager
|
||||||
containerManager cm.ContainerManager
|
containerManager cm.ContainerManager
|
||||||
// cgroup driver used by Docker runtime.
|
// cgroup driver used by Docker runtime.
|
||||||
cgroupDriver string
|
cgroupDriver string
|
||||||
@ -270,10 +270,10 @@ func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeCo
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
|
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
|
||||||
if ds.networkPlugin != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
|
if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
|
||||||
event := make(map[string]interface{})
|
event := make(map[string]interface{})
|
||||||
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
|
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
|
||||||
ds.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
|
ds.network.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
|
|||||||
runtimeReady.Reason = "DockerDaemonNotReady"
|
runtimeReady.Reason = "DockerDaemonNotReady"
|
||||||
runtimeReady.Message = fmt.Sprintf("docker: failed to get docker version: %v", err)
|
runtimeReady.Message = fmt.Sprintf("docker: failed to get docker version: %v", err)
|
||||||
}
|
}
|
||||||
if err := ds.networkPlugin.Status(); err != nil {
|
if err := ds.network.Status(); err != nil {
|
||||||
networkReady.Status = false
|
networkReady.Status = false
|
||||||
networkReady.Reason = "NetworkPluginNotReady"
|
networkReady.Reason = "NetworkPluginNotReady"
|
||||||
networkReady.Message = fmt.Sprintf("docker: network plugin is not ready: %v", err)
|
networkReady.Message = fmt.Sprintf("docker: network plugin is not ready: %v", err)
|
||||||
|
@ -45,7 +45,8 @@ func newTestNetworkPlugin(t *testing.T) *nettest.MockNetworkPlugin {
|
|||||||
func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) {
|
func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clock.FakeClock) {
|
||||||
fakeClock := clock.NewFakeClock(time.Time{})
|
fakeClock := clock.NewFakeClock(time.Time{})
|
||||||
c := dockertools.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23")
|
c := dockertools.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23")
|
||||||
return &dockerService{client: c, os: &containertest.FakeOS{}, networkPlugin: &network.NoopNetworkPlugin{},
|
pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
|
||||||
|
return &dockerService{client: c, os: &containertest.FakeOS{}, network: pm,
|
||||||
legacyCleanup: legacyCleanupFlag{done: 1}, checkpointHandler: NewTestPersistentCheckpointHandler()}, c, fakeClock
|
legacyCleanup: legacyCleanupFlag{done: 1}, checkpointHandler: NewTestPersistentCheckpointHandler()}, c, fakeClock
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ func TestStatus(t *testing.T) {
|
|||||||
|
|
||||||
// Should not report ready status is network plugin returns error.
|
// Should not report ready status is network plugin returns error.
|
||||||
mockPlugin := newTestNetworkPlugin(t)
|
mockPlugin := newTestNetworkPlugin(t)
|
||||||
ds.networkPlugin = mockPlugin
|
ds.network = network.NewPluginManager(mockPlugin)
|
||||||
defer mockPlugin.Finish()
|
defer mockPlugin.Finish()
|
||||||
mockPlugin.EXPECT().Status().Return(errors.New("network error"))
|
mockPlugin.EXPECT().Status().Return(errors.New("network error"))
|
||||||
status, err = ds.Status()
|
status, err = ds.Status()
|
||||||
|
@ -25,11 +25,11 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/dockertools/securitycontext"
|
"k8s.io/kubernetes/pkg/kubelet/dockertools/securitycontext"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
knetwork "k8s.io/kubernetes/pkg/kubelet/network"
|
||||||
)
|
)
|
||||||
|
|
||||||
// applySandboxSecurityContext updates docker sandbox options according to security context.
|
// applySandboxSecurityContext updates docker sandbox options according to security context.
|
||||||
func applySandboxSecurityContext(lc *runtimeapi.LinuxPodSandboxConfig, config *dockercontainer.Config, hc *dockercontainer.HostConfig, networkPlugin network.NetworkPlugin, separator rune) {
|
func applySandboxSecurityContext(lc *runtimeapi.LinuxPodSandboxConfig, config *dockercontainer.Config, hc *dockercontainer.HostConfig, network *knetwork.PluginManager, separator rune) {
|
||||||
if lc == nil {
|
if lc == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -47,8 +47,7 @@ func applySandboxSecurityContext(lc *runtimeapi.LinuxPodSandboxConfig, config *d
|
|||||||
|
|
||||||
modifyContainerConfig(sc, config)
|
modifyContainerConfig(sc, config)
|
||||||
modifyHostConfig(sc, hc, separator)
|
modifyHostConfig(sc, hc, separator)
|
||||||
modifySandboxNamespaceOptions(sc.GetNamespaceOptions(), hc, networkPlugin)
|
modifySandboxNamespaceOptions(sc.GetNamespaceOptions(), hc, network)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyContainerSecurityContext updates docker container options according to security context.
|
// applyContainerSecurityContext updates docker container options according to security context.
|
||||||
@ -109,9 +108,9 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// modifySandboxNamespaceOptions apply namespace options for sandbox
|
// modifySandboxNamespaceOptions apply namespace options for sandbox
|
||||||
func modifySandboxNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig, networkPlugin network.NetworkPlugin) {
|
func modifySandboxNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig, network *knetwork.PluginManager) {
|
||||||
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
||||||
modifyHostNetworkOptionForSandbox(nsOpts.HostNetwork, networkPlugin, hostConfig)
|
modifyHostNetworkOptionForSandbox(nsOpts.HostNetwork, network, hostConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// modifyContainerNamespaceOptions apply namespace options for container
|
// modifyContainerNamespaceOptions apply namespace options for container
|
||||||
@ -137,18 +136,18 @@ func modifyCommonNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig
|
|||||||
}
|
}
|
||||||
|
|
||||||
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
|
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
|
||||||
func modifyHostNetworkOptionForSandbox(hostNetwork bool, networkPlugin network.NetworkPlugin, hc *dockercontainer.HostConfig) {
|
func modifyHostNetworkOptionForSandbox(hostNetwork bool, network *knetwork.PluginManager, hc *dockercontainer.HostConfig) {
|
||||||
if hostNetwork {
|
if hostNetwork {
|
||||||
hc.NetworkMode = namespaceModeHost
|
hc.NetworkMode = namespaceModeHost
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if networkPlugin == nil {
|
if network == nil {
|
||||||
hc.NetworkMode = "default"
|
hc.NetworkMode = "default"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch networkPlugin.Name() {
|
switch network.PluginName() {
|
||||||
case "cni":
|
case "cni":
|
||||||
fallthrough
|
fallthrough
|
||||||
case "kubenet":
|
case "kubenet":
|
||||||
|
Loading…
Reference in New Issue
Block a user