From 4f6c1b5ad5d190a978a4932a4f7926ffe043f272 Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Tue, 28 Mar 2017 12:07:17 -0400 Subject: [PATCH] call GetHostIP from makeEnvironment --- api/swagger-spec/batch_v2alpha1.json | 2 +- pkg/kubelet/container/helpers.go | 5 +--- .../container/testing/fake_runtime_helper.go | 8 +----- pkg/kubelet/dockertools/docker_manager.go | 21 +++++---------- pkg/kubelet/kubelet_pods.go | 17 +++++++----- pkg/kubelet/kubelet_pods_test.go | 9 +++---- pkg/kubelet/kubelet_test.go | 27 ++++++++++++++++++- .../kuberuntime/kuberuntime_container.go | 8 +++--- .../kuberuntime/kuberuntime_manager.go | 10 ++----- .../kuberuntime/kuberuntime_manager_test.go | 2 +- pkg/kubelet/rkt/rkt.go | 20 +++++--------- pkg/kubelet/rkt/rkt_test.go | 2 +- 12 files changed, 66 insertions(+), 65 deletions(-) diff --git a/api/swagger-spec/batch_v2alpha1.json b/api/swagger-spec/batch_v2alpha1.json index 0d5238e2459..7cbb81e9e60 100644 --- a/api/swagger-spec/batch_v2alpha1.json +++ b/api/swagger-spec/batch_v2alpha1.json @@ -3584,7 +3584,7 @@ "properties": { "fieldRef": { "$ref": "v1.ObjectFieldSelector", - "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP." + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP." }, "resourceFieldRef": { "$ref": "v1.ResourceFieldSelector", diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index c36678378d3..3dfecdecbdc 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -21,7 +21,6 @@ import ( "fmt" "hash/adler32" "hash/fnv" - "net" "strings" "time" @@ -49,7 +48,7 @@ type HandlerRunner interface { // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) // GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. @@ -60,8 +59,6 @@ type RuntimeHelper interface { // supplemental groups for the Pod. These extra supplemental groups come // from annotations on persistent volumes that the pod depends on. GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 - - GetHostIP() (net.IP, error) } // ShouldContainerBeRestarted checks whether a container needs to be restarted. diff --git a/pkg/kubelet/container/testing/fake_runtime_helper.go b/pkg/kubelet/container/testing/fake_runtime_helper.go index dd788e198a1..76bbd0a4bc3 100644 --- a/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -17,8 +17,6 @@ limitations under the License. package testing import ( - "net" - kubetypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -34,7 +32,7 @@ type FakeRuntimeHelper struct { Err error } -func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) { +func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { opts.PodContainerDir = f.PodContainerDir @@ -62,7 +60,3 @@ func (f *FakeRuntimeHelper) GetPodDir(podUID kubetypes.UID) string { func (f *FakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { return nil } - -func (f *FakeRuntimeHelper) GetHostIP() (net.IP, error) { - return []byte{}, nil -} diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index 0a3e24b68e0..d89e14c12bc 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -1743,7 +1743,7 @@ func (dm *DockerManager) applyOOMScoreAdj(pod *v1.Pod, container *v1.Container, // Run a single container from a pod. Returns the docker container ID // If do not need to pass labels, just pass nil. -func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, hostIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) { +func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start)) @@ -1756,7 +1756,7 @@ func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, glog.V(5).Infof("Generating ref for container %s: %#v", container.Name, ref) } - opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP) + opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) if err != nil { return kubecontainer.ContainerID{}, fmt.Errorf("GenerateRunContainerOptions: %v", err) } @@ -1993,8 +1993,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *v1.Pod) (kubecontainer.Doc } // Currently we don't care about restart count of infra container, just set it to 0. - // We also don't care about podIP and hostIP since their only passed in during runtime because of downward API - id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", "", imageRef, 0) + id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", imageRef, 0) if err != nil { return "", kubecontainer.ErrRunContainer, err.Error() } @@ -2270,12 +2269,6 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon podIP = podStatus.IP } - rawHostIP, err := dm.runtimeHelper.GetHostIP() - hostIP := rawHostIP.String() - if err != nil { - glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) - } - // If we should create infra container then we do it first. podInfraContainerID := containerChanges.InfraContainerId if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) { @@ -2376,7 +2369,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon } glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) - if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil { + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) return @@ -2414,7 +2407,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon } glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) - if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil { + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue @@ -2425,7 +2418,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon // tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start // was not successful. -func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP, hostIP string) (err error, reason string) { +func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { imageRef, msg, err := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { return err, msg @@ -2452,7 +2445,7 @@ func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, netMode = namespaceMode } - _, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, hostIP, imageRef, restartCount) + _, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, imageRef, restartCount) if err != nil { // TODO(bburns) : Perhaps blacklist a container after N failures? return kubecontainer.ErrRunContainer, err.Error() diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index f12165c946e..d05644f1e95 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -275,7 +275,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) { +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) { var err error useClusterFirstPolicy := false cgroupParent := kl.GetPodCgroupParent(pod) @@ -299,7 +299,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai if err != nil { return nil, false, err } - opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP, hostIP) + opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP) if err != nil { return nil, false, err } @@ -386,7 +386,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } // Make the environment variables for a pod in the given namespace. -func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP, hostIP string) ([]kubecontainer.EnvVar, error) { +func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) { var result []kubecontainer.EnvVar // Note: These are added to the docker Config, but are not included in the checksum computed // by dockertools.BuildDockerName(...). That way, we can still determine whether an @@ -506,7 +506,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // Step 1b: resolve alternate env var sources switch { case envVar.ValueFrom.FieldRef != nil: - runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, hostIP) + runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) if err != nil { return result, err } @@ -607,7 +607,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. -func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP, hostIP string) (string, error) { +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) { internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") if err != nil { return "", err @@ -618,7 +618,12 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod case "spec.serviceAccountName": return pod.Spec.ServiceAccountName, nil case "status.hostIP": - return hostIP, nil + hostIP, err := kl.GetHostIP() + if err != nil { + return "", err + } + + return hostIP.String(), nil case "status.podIP": return podIP, nil } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index b1e8af5730d..85ebd38f991 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -187,7 +187,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { options := make([]*kubecontainer.RunContainerOptions, 4) for i, pod := range pods { var err error - options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "") + options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -220,7 +220,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) { kubelet.resolverConfig = "/etc/resolv.conf" for i, pod := range pods { var err error - options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "") + options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "") if err != nil { t.Fatalf("failed to generate container options: %v", err) } @@ -521,7 +521,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { {Name: "POD_NODE_NAME", Value: "node-name"}, {Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"}, {Name: "POD_IP", Value: "1.2.3.4"}, - {Name: "HOST_IP", Value: "5.6.7.8"}, + {Name: "HOST_IP", Value: testKubeletHostIP}, }, }, { @@ -1154,9 +1154,8 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, } podIP := "1.2.3.4" - hostIP := "5.6.7.8" - result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, hostIP) + result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP) select { case e := <-fakeRecorder.Events: assert.Equal(t, tc.expectedEvent, e) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 6dfb43e8c74..721565e4063 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -79,6 +79,7 @@ func init() { const ( testKubeletHostname = "127.0.0.1" + testKubeletHostIP = "127.0.0.1" testReservationCPU = "200m" testReservationMemory = "100M" @@ -166,7 +167,31 @@ func newTestKubeletWithImageList( kubelet.masterServiceNamespace = metav1.NamespaceDefault kubelet.serviceLister = testServiceLister{} kubelet.nodeLister = testNodeLister{} - kubelet.nodeInfo = testNodeInfo{} + kubelet.nodeInfo = testNodeInfo{ + nodes: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: string(kubelet.nodeName), + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + Reason: "Ready", + Message: "Node ready", + }, + }, + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: testKubeletHostIP, + }, + }, + }, + }, + }, + } kubelet.recorder = fakeRecorder if err := kubelet.setupDataDirs(); err != nil { t.Fatalf("can't initialize kubelet data dirs: %v", err) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 4c20baf116b..5c519354e58 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -51,7 +51,7 @@ import ( // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP, hostIP string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { // Step 1: pull the image. imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { @@ -72,7 +72,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb restartCount = containerStatus.RestartCount + 1 } - containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, hostIP, imageRef) + containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef) if err != nil { m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err) return "Generate Container Config Failed", err @@ -131,8 +131,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb } // generateContainerConfig generates container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, hostIP, imageRef string) (*runtimeapi.ContainerConfig, error) { - opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP) +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, error) { + opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) if err != nil { return nil, err } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 18c8b7a147a..a09909e8c04 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -604,12 +604,6 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podIP = podStatus.IP } - rawHostIP, err := m.runtimeHelper.GetHostIP() - hostIP := rawHostIP.String() - if err != nil { - glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) - } - // Step 4: Create a sandbox for the pod if necessary. podSandboxID := podContainerChanges.SandboxID if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 { @@ -686,7 +680,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) - if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil { + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg)) return @@ -720,7 +714,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) - if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil { + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) continue diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 8bf91ae385b..1b29e2422ea 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -146,7 +146,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt) assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template) - containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", "", template.container.Image) + containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image) assert.NoError(t, err, "generateContainerConfig for container template %+v", template) podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata) diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index fb3ab16054e..7decec4c5bc 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -607,7 +607,7 @@ func setApp(imgManifest *appcschema.ImageManifest, c *v1.Container, } // makePodManifest transforms a kubelet pod spec to the rkt pod manifest. -func (r *Runtime) makePodManifest(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) { +func (r *Runtime) makePodManifest(pod *v1.Pod, podIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) { manifest := appcschema.BlankPodManifest() ctx, cancel := context.WithTimeout(context.Background(), r.requestTimeout) @@ -654,7 +654,7 @@ func (r *Runtime) makePodManifest(pod *v1.Pod, podIP, hostIP string, pullSecrets } for _, c := range pod.Spec.Containers { - err := r.newAppcRuntimeApp(pod, podIP, hostIP, c, requiresPrivileged, pullSecrets, manifest) + err := r.newAppcRuntimeApp(pod, podIP, c, requiresPrivileged, pullSecrets, manifest) if err != nil { return nil, err } @@ -776,7 +776,7 @@ func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, return &mnt, nil } -func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP, hostIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error { +func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error { var annotations appctypes.Annotations = []appctypes.Annotation{ { Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), @@ -810,7 +810,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP, hostIP string, c v1.Cont } // TODO: determine how this should be handled for rkt - opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP, hostIP) + opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP) if err != nil { return err } @@ -1135,9 +1135,9 @@ func constructSyslogIdentifier(generateName string, podName string) string { // // On success, it will return a string that represents name of the unit file // and the runtime pod. -func (r *Runtime) preparePod(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) { +func (r *Runtime) preparePod(pod *v1.Pod, podIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) { // Generate the appc pod manifest from the k8s pod spec. - manifest, err := r.makePodManifest(pod, podIP, hostIP, pullSecrets) + manifest, err := r.makePodManifest(pod, podIP, pullSecrets) if err != nil { return "", nil, err } @@ -1349,13 +1349,7 @@ func (r *Runtime) RunPod(pod *v1.Pod, pullSecrets []v1.Secret) error { return err } - rawHostIP, err := r.runtimeHelper.GetHostIP() - hostIP := rawHostIP.String() - if err != nil { - glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err) - } - - name, runtimePod, prepareErr := r.preparePod(pod, podIP, hostIP, pullSecrets, netnsName) + name, runtimePod, prepareErr := r.preparePod(pod, podIP, pullSecrets, netnsName) // Set container references and generate events. // If preparedPod fails, then send out 'failed' events for each container. diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index c5118f9cf0b..d38a48c3e95 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -1902,7 +1902,7 @@ func TestMakePodManifestAnnotations(t *testing.T) { for i, testCase := range testCases { hint := fmt.Sprintf("case #%d", i) - result, err := r.makePodManifest(testCase.in, "", "", []v1.Secret{}) + result, err := r.makePodManifest(testCase.in, "", []v1.Secret{}) assert.Equal(t, testCase.outerr, err, hint) if err == nil { sort.Sort(annotationsByName(result.Annotations))