From c001deed43e8b6304066b4c8ea08f3b8f2cde23a Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Sat, 18 Mar 2017 22:38:38 -0400 Subject: [PATCH] fetch hostIP at runtime since status manager didn't update it yet --- pkg/kubelet/container/helpers.go | 7 +++++-- .../container/testing/fake_runtime_helper.go | 8 ++++++- pkg/kubelet/dockertools/docker_manager.go | 21 ++++++++++++------- pkg/kubelet/kubelet_pods.go | 12 +++++------ pkg/kubelet/kubelet_pods_test.go | 10 ++++----- .../kuberuntime/kuberuntime_container.go | 8 +++---- .../kuberuntime/kuberuntime_manager.go | 10 +++++++-- .../kuberuntime/kuberuntime_manager_test.go | 2 +- pkg/kubelet/rkt/rkt.go | 20 +++++++++++------- pkg/kubelet/rkt/rkt_test.go | 2 +- test/e2e/common/downward_api.go | 2 +- 11 files changed, 64 insertions(+), 38 deletions(-) diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 4c5344be990..c36678378d3 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/adler32" "hash/fnv" + "net" "strings" "time" @@ -46,9 +47,9 @@ type HandlerRunner interface { } // RuntimeHelper wraps kubelet to make container runtime -// able to get necessary informations like the RunContainerOptions, DNS settings. +// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) // GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. @@ -59,6 +60,8 @@ type RuntimeHelper interface { // supplemental groups for the Pod. These extra supplemental groups come // from annotations on persistent volumes that the pod depends on. GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 + + GetHostIP() (net.IP, error) } // ShouldContainerBeRestarted checks whether a container needs to be restarted. diff --git a/pkg/kubelet/container/testing/fake_runtime_helper.go b/pkg/kubelet/container/testing/fake_runtime_helper.go index 76bbd0a4bc3..dd788e198a1 100644 --- a/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -17,6 +17,8 @@ limitations under the License. package testing import ( + "net" + kubetypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -32,7 +34,7 @@ type FakeRuntimeHelper struct { Err error } -func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) { +func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { opts.PodContainerDir = f.PodContainerDir @@ -60,3 +62,7 @@ func (f *FakeRuntimeHelper) GetPodDir(podUID kubetypes.UID) string { func (f *FakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return nil } + +func (f *FakeRuntimeHelper) GetHostIP() (net.IP, error) { + return []byte{}, nil +} diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index d89e14c12bc..0a3e24b68e0 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -1743,7 +1743,7 @@ func (dm *DockerManager) applyOOMScoreAdj(pod *v1.Pod, container *v1.Container, // Run a single container from a pod. Returns the docker container ID // If do not need to pass labels, just pass nil. -func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) { +func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, hostIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start)) @@ -1756,7 +1756,7 @@ func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, glog.V(5).Infof("Generating ref for container %s: %#v", container.Name, ref) } - opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) + opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP) if err != nil { return kubecontainer.ContainerID{}, fmt.Errorf("GenerateRunContainerOptions: %v", err) } @@ -1993,7 +1993,8 @@ func (dm *DockerManager) createPodInfraContainer(pod *v1.Pod) (kubecontainer.Doc } // Currently we don't care about restart count of infra container, just set it to 0. - id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", imageRef, 0) + // We also don't care about podIP and hostIP since their only passed in during runtime because of downward API + id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", "", imageRef, 0) if err != nil { return "", kubecontainer.ErrRunContainer, err.Error() } @@ -2269,6 +2270,12 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon podIP = podStatus.IP } + rawHostIP, err := dm.runtimeHelper.GetHostIP() + hostIP := rawHostIP.String() + if err != nil { + glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) + } + // If we should create infra container then we do it first. podInfraContainerID := containerChanges.InfraContainerId if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) { @@ -2369,7 +2376,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon } glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) - if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) return @@ -2407,7 +2414,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon } glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) - if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue @@ -2418,7 +2425,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon // tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start // was not successful. -func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { +func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP, hostIP string) (err error, reason string) { imageRef, msg, err := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { return err, msg @@ -2445,7 +2452,7 @@ func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, netMode = namespaceMode } - _, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, imageRef, restartCount) + _, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, hostIP, imageRef, restartCount) if err != nil { // TODO(bburns) : Perhaps blacklist a container after N failures? return kubecontainer.ErrRunContainer, err.Error() diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 175a34e7061..f12165c946e 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -275,7 +275,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) { +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) { var err error useClusterFirstPolicy := false cgroupParent := kl.GetPodCgroupParent(pod) @@ -299,7 +299,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai if err != nil { return nil, false, err } - opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP) + opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP, hostIP) if err != nil { return nil, false, err } @@ -386,7 +386,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } // Make the environment variables for a pod in the given namespace. -func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) { +func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP, hostIP string) ([]kubecontainer.EnvVar, error) { var result []kubecontainer.EnvVar // Note: These are added to the docker Config, but are not included in the checksum computed // by dockertools.BuildDockerName(...). That way, we can still determine whether an @@ -506,7 +506,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // Step 1b: resolve alternate env var sources switch { case envVar.ValueFrom.FieldRef != nil: - runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) + runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, hostIP) if err != nil { return result, err } @@ -607,7 +607,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. -func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) { +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP, hostIP string) (string, error) { internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") if err != nil { return "", err @@ -618,7 +618,7 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod case "spec.serviceAccountName": return pod.Spec.ServiceAccountName, nil case "status.hostIP": - return pod.Status.HostIP, nil + return hostIP, nil case "status.podIP": return podIP, nil } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 56962fdee1f..b1e8af5730d 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -187,7 +187,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { options := make([]*kubecontainer.RunContainerOptions, 4) for i, pod := range pods { var err error - options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") + options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -220,7 +220,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { kubelet.resolverConfig = "/etc/resolv.conf" for i, pod := range pods { var err error - options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") + options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -1152,13 +1152,11 @@ func TestMakeEnvironmentVariables(t *testing.T) { ServiceAccountName: "special", NodeName: "node-name", }, - Status: v1.PodStatus{ - HostIP: "5.6.7.8", - }, } podIP := "1.2.3.4" + hostIP := "5.6.7.8" - result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP) + result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, hostIP) select { case e := <-fakeRecorder.Events: assert.Equal(t, tc.expectedEvent, e) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 5c519354e58..4c20baf116b 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -51,7 +51,7 @@ import ( // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP, hostIP string) (string, error) { // Step 1: pull the image. imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { @@ -72,7 +72,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb restartCount = containerStatus.RestartCount + 1 } - containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef) + containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, hostIP, imageRef) if err != nil { m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) return "Generate Container Config Failed", err @@ -131,8 +131,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb } // generateContainerConfig generates container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, error) { - opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, hostIP, imageRef string) (*runtimeapi.ContainerConfig, error) { + opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP) if err != nil { return nil, err } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index a09909e8c04..18c8b7a147a 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -604,6 +604,12 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podIP = podStatus.IP } + rawHostIP, err := m.runtimeHelper.GetHostIP() + hostIP := rawHostIP.String() + if err != nil { + glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) + } + // Step 4: Create a sandbox for the pod if necessary. podSandboxID := podContainerChanges.SandboxID if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 { @@ -680,7 +686,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) - if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg)) return @@ -714,7 +720,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) - if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 1b29e2422ea..8bf91ae385b 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -146,7 +146,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt) assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template) - containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image) + containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", "", template.container.Image) assert.NoError(t, err, "generateContainerConfig for container template %+v", template) podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata) diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 7decec4c5bc..fb3ab16054e 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -607,7 +607,7 @@ func setApp(imgManifest *appcschema.ImageManifest, c *v1.Container, } // makePodManifest transforms a kubelet pod spec to the rkt pod manifest. -func (r *Runtime) makePodManifest(pod *v1.Pod, podIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) { +func (r *Runtime) makePodManifest(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) { manifest := appcschema.BlankPodManifest() ctx, cancel := context.WithTimeout(context.Background(), r.requestTimeout) @@ -654,7 +654,7 @@ func (r *Runtime) makePodManifest(pod *v1.Pod, podIP string, pullSecrets []v1.Se } for _, c := range pod.Spec.Containers { - err := r.newAppcRuntimeApp(pod, podIP, c, requiresPrivileged, pullSecrets, manifest) + err := r.newAppcRuntimeApp(pod, podIP, hostIP, c, requiresPrivileged, pullSecrets, manifest) if err != nil { return nil, err } @@ -776,7 +776,7 @@ func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, return &mnt, nil } -func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error { +func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP, hostIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error { var annotations appctypes.Annotations = []appctypes.Annotation{ { Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), @@ -810,7 +810,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, r } // TODO: determine how this should be handled for rkt - opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP) + opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP, hostIP) if err != nil { return err } @@ -1135,9 +1135,9 @@ func constructSyslogIdentifier(generateName string, podName string) string { // // On success, it will return a string that represents name of the unit file // and the runtime pod. -func (r *Runtime) preparePod(pod *v1.Pod, podIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) { +func (r *Runtime) preparePod(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) { // Generate the appc pod manifest from the k8s pod spec. - manifest, err := r.makePodManifest(pod, podIP, pullSecrets) + manifest, err := r.makePodManifest(pod, podIP, hostIP, pullSecrets) if err != nil { return "", nil, err } @@ -1349,7 +1349,13 @@ func (r *Runtime) RunPod(pod *v1.Pod, pullSecrets []v1.Secret) error { return err } - name, runtimePod, prepareErr := r.preparePod(pod, podIP, pullSecrets, netnsName) + rawHostIP, err := r.runtimeHelper.GetHostIP() + hostIP := rawHostIP.String() + if err != nil { + glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) + } + + name, runtimePod, prepareErr := r.preparePod(pod, podIP, hostIP, pullSecrets, netnsName) // Set container references and generate events. // If preparedPod fails, then send out 'failed' events for each container. diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index d38a48c3e95..c5118f9cf0b 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -1902,7 +1902,7 @@ func TestMakePodManifestAnnotations(t *testing.T) { for i, testCase := range testCases { hint := fmt.Sprintf("case #%d", i) - result, err := r.makePodManifest(testCase.in, "", []v1.Secret{}) + result, err := r.makePodManifest(testCase.in, "", "", []v1.Secret{}) assert.Equal(t, testCase.outerr, err, hint) if err == nil { sort.Sort(annotationsByName(result.Annotations)) diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index 226a6446710..a81570d2113 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Downward API", func() { testDownwardAPI(f, podName, env, expectations) }) - It("should provide pod IP as an env var [Conformance]", func() { + It("should provide pod and host IP as an env var [Conformance]", func() { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ {