Clean up host networking plugin checks for docker manager

We can save a docker inspect in podInfraContainerChanged() because
it's only used within the useHostNetwork() block.  We can also
consolidate some code in createPodInfraContainer() because if
the pod uses the host network, no network plugin will be involved.

Finally, in syncPodWithSyncResult() we can consolidate some
conditionals because both hairpin setup and getting the container
IP are only relevant when host networking is *not* being used.

More specifically, putting the dm.determineContainerIP() call
into the !useHostNetwork() block is OK since if no network plugin
was called to set the container up, it makes no sense to call
the network plugin to retrieve the IP address that it did not
handle.  The CNI plugin even calls back into the docker manager
to GetContainerIP() which grabs the IP from docker, which will
always be "" for host networked containers anyway.
This commit is contained in:
Dan Williams 2016-02-01 16:56:56 -06:00
parent 49f438bfac
commit bc62096ad5
2 changed files with 39 additions and 54 deletions

View File

@ -799,19 +799,16 @@ func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error {
// podInfraContainerChanged returns true if the pod infra container has changed. // podInfraContainerChanged returns true if the pod infra container has changed.
func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) { func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) {
networkMode := ""
var ports []api.ContainerPort var ports []api.ContainerPort
dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainerStatus.ID.ID)
if err != nil {
return false, err
}
// Check network mode. // Check network mode.
if dockerPodInfraContainer.HostConfig != nil {
networkMode = dockerPodInfraContainer.HostConfig.NetworkMode
}
if usesHostNetwork(pod) { if usesHostNetwork(pod) {
dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainerStatus.ID.ID)
if err != nil {
return false, err
}
networkMode := getDockerNetworkMode(dockerPodInfraContainer)
if networkMode != namespaceModeHost { if networkMode != namespaceModeHost {
glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode) glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode)
return true, nil return true, nil
@ -843,6 +840,14 @@ func readOnlyRootFilesystem(container *api.Container) bool {
return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem
} }
// container must not be nil
func getDockerNetworkMode(container *docker.Container) string {
if container.HostConfig != nil {
return container.HostConfig.NetworkMode
}
return ""
}
// dockerVersion implementes kubecontainer.Version interface by implementing // dockerVersion implementes kubecontainer.Version interface by implementing
// Compare() and String() (which is implemented by the underlying semver.Version) // Compare() and String() (which is implemented by the underlying semver.Version)
// TODO: this code is the same as rktVersion and may make sense to be moved to // TODO: this code is the same as rktVersion and may make sense to be moved to
@ -1265,7 +1270,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
result.Fail(err) result.Fail(err)
return return
} }
if ins.HostConfig != nil && ins.HostConfig.NetworkMode != namespaceModeHost { if getDockerNetworkMode(ins) != namespaceModeHost {
teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace)) teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace))
result.AddSyncResult(teardownNetworkResult) result.AddSyncResult(teardownNetworkResult)
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubecontainer.DockerID(networkContainer.ID.ID)); err != nil { if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubecontainer.DockerID(networkContainer.ID.ID)); err != nil {
@ -1585,12 +1590,10 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
netNamespace := "" netNamespace := ""
var ports []api.ContainerPort var ports []api.ContainerPort
if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
netNamespace = "none"
}
if usesHostNetwork(pod) { if usesHostNetwork(pod) {
netNamespace = namespaceModeHost netNamespace = namespaceModeHost
} else if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
netNamespace = "none"
} else { } else {
// Docker only exports ports from the pod infra container. Let's // Docker only exports ports from the pod infra container. Let's
// collect all of the relevant ports and export them. // collect all of the relevant ports and export them.
@ -1847,25 +1850,26 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
} }
return return
} }
}
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready) // Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
var podInfraContainer *docker.Container var podInfraContainer *docker.Container
podInfraContainer, err = dm.client.InspectContainer(string(podInfraContainerID)) podInfraContainer, err = dm.client.InspectContainer(string(podInfraContainerID))
if err != nil { if err != nil {
glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod)) glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod))
result.Fail(err) result.Fail(err)
return return
}
if !usesHostNetwork(pod) && dm.configureHairpinMode {
if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil {
glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
} }
}
// Find the pod IP after starting the infra container in order to expose if dm.configureHairpinMode {
// it safely via the downward API without a race and be able to use podIP in kubelet-managed /etc/hosts file. if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil {
pod.Status.PodIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer) glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
}
}
// Find the pod IP after starting the infra container in order to expose
// it safely via the downward API without a race and be able to use podIP in kubelet-managed /etc/hosts file.
pod.Status.PodIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer)
}
} }
// Start everything // Start everything

View File

@ -710,8 +710,6 @@ func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Inspect pod infra container (but does not create)"
"inspect_container",
// Create container. // Create container.
"create", "start", "inspect_container", "create", "start", "inspect_container",
}) })
@ -798,8 +796,6 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill the duplicated container. // Kill the duplicated container.
"stop", "stop",
}) })
@ -836,8 +832,6 @@ func TestSyncPodBadHash(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill and restart the bad hash container. // Kill and restart the bad hash container.
"stop", "create", "start", "inspect_container", "stop", "create", "start", "inspect_container",
}) })
@ -878,8 +872,6 @@ func TestSyncPodsUnhealthy(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill the unhealthy container. // Kill the unhealthy container.
"stop", "stop",
// Restart the unhealthy container. // Restart the unhealthy container.
@ -918,10 +910,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{})
// Check the pod infra container.
"inspect_container",
})
} }
func TestSyncPodWithRestartPolicy(t *testing.T) { func TestSyncPodWithRestartPolicy(t *testing.T) {
@ -980,8 +969,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
{ {
api.RestartPolicyAlways, api.RestartPolicyAlways,
[]string{ []string{
// Check the pod infra container.
"inspect_container",
// Restart both containers. // Restart both containers.
"create", "start", "inspect_container", "create", "start", "inspect_container", "create", "start", "inspect_container", "create", "start", "inspect_container",
}, },
@ -991,8 +978,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
{ {
api.RestartPolicyOnFailure, api.RestartPolicyOnFailure,
[]string{ []string{
// Check the pod infra container.
"inspect_container",
// Restart the failed container. // Restart the failed container.
"create", "start", "inspect_container", "create", "start", "inspect_container",
}, },
@ -1003,7 +988,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
api.RestartPolicyNever, api.RestartPolicyNever,
[]string{ []string{
// Check the pod infra container. // Check the pod infra container.
"inspect_container", "inspect_container", "inspect_container", "inspect_container", "inspect_container",
// Stop the last pod infra container. // Stop the last pod infra container.
"stop", "stop",
}, },
@ -1077,8 +1062,8 @@ func TestSyncPodBackoff(t *testing.T) {
}, },
} }
startCalls := []string{"inspect_container", "create", "start", "inspect_container"} startCalls := []string{"create", "start", "inspect_container"}
backOffCalls := []string{"inspect_container"} backOffCalls := []string{}
startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""} startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""} backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
tests := []struct { tests := []struct {
@ -1287,8 +1272,6 @@ func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false) runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Create container. // Create container.
"create", "start", "inspect_container", "create", "start", "inspect_container",
}) })
@ -1339,8 +1322,6 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, true) runSyncPod(t, dm, fakeDocker, pod, nil, true)
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Create the container. // Create the container.
"create", "start", "create", "start",
// Kill the container since event handler fails. // Kill the container since event handler fails.
@ -1461,7 +1442,7 @@ func TestSyncPodWithHostNetwork(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
// Create pod infra container. // Create pod infra container.
"create", "start", "inspect_container", "inspect_container", "create", "start", "inspect_container",
// Create container. // Create container.
"create", "start", "inspect_container", "create", "start", "inspect_container",
}) })