Merge pull request #20578 from dcbw/dont-cni-hostnet-pods

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-03-05 09:03:12 -08:00
commit f359bd96a7
2 changed files with 39 additions and 54 deletions

View File

@ -791,19 +791,16 @@ func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error {
// podInfraContainerChanged returns true if the pod infra container has changed.
func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) {
networkMode := ""
var ports []api.ContainerPort
// Check network mode.
if usesHostNetwork(pod) {
dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainerStatus.ID.ID)
if err != nil {
return false, err
}
// Check network mode.
if dockerPodInfraContainer.HostConfig != nil {
networkMode = dockerPodInfraContainer.HostConfig.NetworkMode
}
if usesHostNetwork(pod) {
networkMode := getDockerNetworkMode(dockerPodInfraContainer)
if networkMode != namespaceModeHost {
glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode)
return true, nil
@ -835,6 +832,14 @@ func readOnlyRootFilesystem(container *api.Container) bool {
return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem
}
// container must not be nil
func getDockerNetworkMode(container *docker.Container) string {
if container.HostConfig != nil {
return container.HostConfig.NetworkMode
}
return ""
}
// dockerVersion implementes kubecontainer.Version interface by implementing
// Compare() and String() (which is implemented by the underlying semver.Version)
// TODO: this code is the same as rktVersion and may make sense to be moved to
@ -1257,7 +1262,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
result.Fail(err)
return
}
if ins.HostConfig != nil && ins.HostConfig.NetworkMode != namespaceModeHost {
if getDockerNetworkMode(ins) != namespaceModeHost {
teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace))
result.AddSyncResult(teardownNetworkResult)
if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, kubecontainer.DockerID(networkContainer.ID.ID)); err != nil {
@ -1577,12 +1582,10 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
netNamespace := ""
var ports []api.ContainerPort
if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
netNamespace = "none"
}
if usesHostNetwork(pod) {
netNamespace = namespaceModeHost
} else if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" {
netNamespace = "none"
} else {
// Docker only exports ports from the pod infra container. Let's
// collect all of the relevant ports and export them.
@ -1839,7 +1842,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
}
return
}
}
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
var podInfraContainer *docker.Container
@ -1849,7 +1851,8 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
result.Fail(err)
return
}
if !usesHostNetwork(pod) && dm.configureHairpinMode {
if dm.configureHairpinMode {
if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil {
glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
}
@ -1859,6 +1862,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
// it safely via the downward API without a race and be able to use podIP in kubelet-managed /etc/hosts file.
pod.Status.PodIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer)
}
}
// Start everything
for idx := range containerChanges.ContainersToStart {

View File

@ -710,8 +710,6 @@ func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Inspect pod infra container (but does not create)"
"inspect_container",
// Create container.
"create", "start", "inspect_container",
})
@ -798,8 +796,6 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill the duplicated container.
"stop",
})
@ -836,8 +832,6 @@ func TestSyncPodBadHash(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill and restart the bad hash container.
"stop", "create", "start", "inspect_container",
})
@ -878,8 +872,6 @@ func TestSyncPodsUnhealthy(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Kill the unhealthy container.
"stop",
// Restart the unhealthy container.
@ -918,10 +910,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
})
verifyCalls(t, fakeDocker, []string{})
}
func TestSyncPodWithRestartPolicy(t *testing.T) {
@ -980,8 +969,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
{
api.RestartPolicyAlways,
[]string{
// Check the pod infra container.
"inspect_container",
// Restart both containers.
"create", "start", "inspect_container", "create", "start", "inspect_container",
},
@ -991,8 +978,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
{
api.RestartPolicyOnFailure,
[]string{
// Check the pod infra container.
"inspect_container",
// Restart the failed container.
"create", "start", "inspect_container",
},
@ -1003,7 +988,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
api.RestartPolicyNever,
[]string{
// Check the pod infra container.
"inspect_container", "inspect_container", "inspect_container",
"inspect_container", "inspect_container",
// Stop the last pod infra container.
"stop",
},
@ -1077,8 +1062,8 @@ func TestSyncPodBackoff(t *testing.T) {
},
}
startCalls := []string{"inspect_container", "create", "start", "inspect_container"}
backOffCalls := []string{"inspect_container"}
startCalls := []string{"create", "start", "inspect_container"}
backOffCalls := []string{}
startResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", nil, ""}
backoffResult := &kubecontainer.SyncResult{kubecontainer.StartContainer, "bad", kubecontainer.ErrCrashLoopBackOff, ""}
tests := []struct {
@ -1287,8 +1272,6 @@ func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Create container.
"create", "start", "inspect_container",
})
@ -1339,8 +1322,6 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
runSyncPod(t, dm, fakeDocker, pod, nil, true)
verifyCalls(t, fakeDocker, []string{
// Check the pod infra container.
"inspect_container",
// Create the container.
"create", "start",
// Kill the container since event handler fails.
@ -1461,7 +1442,7 @@ func TestSyncPodWithHostNetwork(t *testing.T) {
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
"create", "start", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})