mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Clean up host networking plugin checks for docker manager
We can save a docker inspect in podInfraContainerChanged() because it's only used within the useHostNetwork() block. We can also consolidate some code in createPodInfraContainer() because if the pod uses the host network, no network plugin will be involved. Finally, in syncPodWithSyncResult() we can consolidate some conditionals because both hairpin setup and getting the container IP are only relevant when host networking is *not* being used. More specifically, putting the dm.determineContainerIP() call into the !useHostNetwork() block is OK since if no network plugin was called to set the container up, it makes no sense to call the network plugin to retrieve the IP address that it did not handle. The CNI plugin even calls back into the docker manager to GetContainerIP() which grabs the IP from docker, which will always be "" for host networked containers anyway.
This commit is contained in:
parent
49f438bfac
commit
bc62096ad5
@ -799,19 +799,16 @@ func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
|
||||
// podInfraContainerChanged returns true if the pod infra container has changed.
|
||||
func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) {
|
||||
networkMode := ""
|
||||
var ports []api.ContainerPort
|
||||
|
||||
// Check network mode.
|
||||
if usesHostNetwork(pod) {
|
||||
dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainerStatus.ID.ID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Check network mode.
|
||||
if dockerPodInfraContainer.HostConfig != nil {
|
||||
networkMode = dockerPodInfraContainer.HostConfig.NetworkMode
|
||||
}
|
||||
if usesHostNetwork(pod) {
|
||||
networkMode := getDockerNetworkMode(dockerPodInfraContainer)
|
||||
if networkMode != namespaceModeHost {
|
||||
glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode)
|
||||
return true, nil
|
||||
@ -843,6 +840,14 @@ func readOnlyRootFilesystem(container *api.Container) bool {
|
||||
return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem
|
||||
}
|
||||
|
||||
// container must not be nil
|
||||
func getDockerNetworkMode(container *docker.Container) string {
|
||||
if container.HostConfig != nil {
|
||||
return container.HostConfig.NetworkMode
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// dockerVersion implementes kubecontainer.Version interface by implementing
|
||||
// Compare() and String() (which is implemented by the underlying semver.Version)
|
||||
// TODO: this code is the same as rktVersion and may make sense to be moved to
|
||||
@ -1265,7 +1270,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
if ins.HostConfig != nil && ins.HostConfig.NetworkMode != namespaceModeHost {
|
||||
if getDockerNetworkMode(ins) != namespaceModeHost {
|
||||
teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace))
|
||||
result.AddSyncResult(teardownNetworkResult)
|
||||
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubecontainer.DockerID(networkContainer.ID.ID)); err != nil {
|
||||
@ -1585,12 +1590,10 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
|
||||
netNamespace := ""
|
||||
var ports []api.ContainerPort
|
||||
|
||||
if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
|
||||
netNamespace = "none"
|
||||
}
|
||||
|
||||
if usesHostNetwork(pod) {
|
||||
netNamespace = namespaceModeHost
|
||||
} else if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
|
||||
netNamespace = "none"
|
||||
} else {
|
||||
// Docker only exports ports from the pod infra container. Let's
|
||||
// collect all of the relevant ports and export them.
|
||||
@ -1847,7 +1850,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
|
||||
var podInfraContainer *docker.Container
|
||||
@ -1857,7 +1859,8 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
if !usesHostNetwork(pod) && dm.configureHairpinMode {
|
||||
|
||||
if dm.configureHairpinMode {
|
||||
if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil {
|
||||
glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
@ -1867,6 +1870,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
||||
// it safely via the downward API without a race and be able to use podIP in kubelet-managed /etc/hosts file.
|
||||
pod.Status.PodIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer)
|
||||
}
|
||||
}
|
||||
|
||||
// Start everything
|
||||
for idx := range containerChanges.ContainersToStart {
|
||||
|
@ -710,8 +710,6 @@ func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Inspect pod infra container (but does not create)"
|
||||
"inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
@ -798,8 +796,6 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the duplicated container.
|
||||
"stop",
|
||||
})
|
||||
@ -836,8 +832,6 @@ func TestSyncPodBadHash(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill and restart the bad hash container.
|
||||
"stop", "create", "start", "inspect_container",
|
||||
})
|
||||
@ -878,8 +872,6 @@ func TestSyncPodsUnhealthy(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the unhealthy container.
|
||||
"stop",
|
||||
// Restart the unhealthy container.
|
||||
@ -918,10 +910,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
})
|
||||
verifyCalls(t, fakeDocker, []string{})
|
||||
}
|
||||
|
||||
func TestSyncPodWithRestartPolicy(t *testing.T) {
|
||||
@ -980,8 +969,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
||||
{
|
||||
api.RestartPolicyAlways,
|
||||
[]string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Restart both containers.
|
||||
"create", "start", "inspect_container", "create", "start", "inspect_container",
|
||||
},
|
||||
@ -991,8 +978,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
||||
{
|
||||
api.RestartPolicyOnFailure,
|
||||
[]string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Restart the failed container.
|
||||
"create", "start", "inspect_container",
|
||||
},
|
||||
@ -1003,7 +988,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
||||
api.RestartPolicyNever,
|
||||
[]string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container", "inspect_container", "inspect_container",
|
||||
"inspect_container", "inspect_container",
|
||||
// Stop the last pod infra container.
|
||||
"stop",
|
||||
},
|
||||
@ -1077,8 +1062,8 @@ func TestSyncPodBackoff(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
startCalls := []string{"inspect_container", "create", "start", "inspect_container"}
|
||||
backOffCalls := []string{"inspect_container"}
|
||||
startCalls := []string{"create", "start", "inspect_container"}
|
||||
backOffCalls := []string{}
|
||||
startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
|
||||
backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
|
||||
tests := []struct {
|
||||
@ -1287,8 +1272,6 @@ func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
@ -1339,8 +1322,6 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
||||
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Create the container.
|
||||
"create", "start",
|
||||
// Kill the container since event handler fails.
|
||||
@ -1461,7 +1442,7 @@ func TestSyncPodWithHostNetwork(t *testing.T) {
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container", "inspect_container",
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start", "inspect_container",
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user