mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
call GetHostIP from makeEnvironment
This commit is contained in:
parent
f05d584a4a
commit
4f6c1b5ad5
@ -3584,7 +3584,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"fieldRef": {
|
"fieldRef": {
|
||||||
"$ref": "v1.ObjectFieldSelector",
|
"$ref": "v1.ObjectFieldSelector",
|
||||||
"description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP."
|
"description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP."
|
||||||
},
|
},
|
||||||
"resourceFieldRef": {
|
"resourceFieldRef": {
|
||||||
"$ref": "v1.ResourceFieldSelector",
|
"$ref": "v1.ResourceFieldSelector",
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"hash/adler32"
|
"hash/adler32"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"net"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -49,7 +48,7 @@ type HandlerRunner interface {
|
|||||||
// RuntimeHelper wraps kubelet to make container runtime
|
// RuntimeHelper wraps kubelet to make container runtime
|
||||||
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
|
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
|
||||||
type RuntimeHelper interface {
|
type RuntimeHelper interface {
|
||||||
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error)
|
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error)
|
||||||
GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error)
|
GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error)
|
||||||
// GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host
|
// GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host
|
||||||
// of a pod.
|
// of a pod.
|
||||||
@ -60,8 +59,6 @@ type RuntimeHelper interface {
|
|||||||
// supplemental groups for the Pod. These extra supplemental groups come
|
// supplemental groups for the Pod. These extra supplemental groups come
|
||||||
// from annotations on persistent volumes that the pod depends on.
|
// from annotations on persistent volumes that the pod depends on.
|
||||||
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
|
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
|
||||||
|
|
||||||
GetHostIP() (net.IP, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
|
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
|
||||||
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||||||
package testing
|
package testing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
|
||||||
|
|
||||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
@ -34,7 +32,7 @@ type FakeRuntimeHelper struct {
|
|||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) {
|
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) {
|
||||||
var opts kubecontainer.RunContainerOptions
|
var opts kubecontainer.RunContainerOptions
|
||||||
if len(container.TerminationMessagePath) != 0 {
|
if len(container.TerminationMessagePath) != 0 {
|
||||||
opts.PodContainerDir = f.PodContainerDir
|
opts.PodContainerDir = f.PodContainerDir
|
||||||
@ -62,7 +60,3 @@ func (f *FakeRuntimeHelper) GetPodDir(podUID kubetypes.UID) string {
|
|||||||
func (f *FakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
func (f *FakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeRuntimeHelper) GetHostIP() (net.IP, error) {
|
|
||||||
return []byte{}, nil
|
|
||||||
}
|
|
||||||
|
@ -1743,7 +1743,7 @@ func (dm *DockerManager) applyOOMScoreAdj(pod *v1.Pod, container *v1.Container,
|
|||||||
|
|
||||||
// Run a single container from a pod. Returns the docker container ID
|
// Run a single container from a pod. Returns the docker container ID
|
||||||
// If do not need to pass labels, just pass nil.
|
// If do not need to pass labels, just pass nil.
|
||||||
func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, hostIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) {
|
func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP, imageRef string, restartCount int) (kubecontainer.ContainerID, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
|
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
|
||||||
@ -1756,7 +1756,7 @@ func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container,
|
|||||||
glog.V(5).Infof("Generating ref for container %s: %#v", container.Name, ref)
|
glog.V(5).Infof("Generating ref for container %s: %#v", container.Name, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP)
|
opts, useClusterFirstPolicy, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return kubecontainer.ContainerID{}, fmt.Errorf("GenerateRunContainerOptions: %v", err)
|
return kubecontainer.ContainerID{}, fmt.Errorf("GenerateRunContainerOptions: %v", err)
|
||||||
}
|
}
|
||||||
@ -1993,8 +1993,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *v1.Pod) (kubecontainer.Doc
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Currently we don't care about restart count of infra container, just set it to 0.
|
// Currently we don't care about restart count of infra container, just set it to 0.
|
||||||
// We also don't care about podIP and hostIP since their only passed in during runtime because of downward API
|
id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", imageRef, 0)
|
||||||
id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", "", imageRef, 0)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", kubecontainer.ErrRunContainer, err.Error()
|
return "", kubecontainer.ErrRunContainer, err.Error()
|
||||||
}
|
}
|
||||||
@ -2270,12 +2269,6 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon
|
|||||||
podIP = podStatus.IP
|
podIP = podStatus.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
rawHostIP, err := dm.runtimeHelper.GetHostIP()
|
|
||||||
hostIP := rawHostIP.String()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we should create infra container then we do it first.
|
// If we should create infra container then we do it first.
|
||||||
podInfraContainerID := containerChanges.InfraContainerId
|
podInfraContainerID := containerChanges.InfraContainerId
|
||||||
if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) {
|
if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) {
|
||||||
@ -2376,7 +2369,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
||||||
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil {
|
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
|
||||||
startContainerResult.Fail(err, msg)
|
startContainerResult.Fail(err, msg)
|
||||||
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||||
return
|
return
|
||||||
@ -2414,7 +2407,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||||
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP, hostIP); err != nil {
|
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
|
||||||
startContainerResult.Fail(err, msg)
|
startContainerResult.Fail(err, msg)
|
||||||
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||||
continue
|
continue
|
||||||
@ -2425,7 +2418,7 @@ func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecon
|
|||||||
|
|
||||||
// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start
|
// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start
|
||||||
// was not successful.
|
// was not successful.
|
||||||
func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP, hostIP string) (err error, reason string) {
|
func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) {
|
||||||
imageRef, msg, err := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets)
|
imageRef, msg, err := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, msg
|
return err, msg
|
||||||
@ -2452,7 +2445,7 @@ func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod,
|
|||||||
netMode = namespaceMode
|
netMode = namespaceMode
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, hostIP, imageRef, restartCount)
|
_, err = dm.runContainerInPod(pod, container, netMode, namespaceMode, pidMode, podIP, imageRef, restartCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
// TODO(bburns) : Perhaps blacklist a container after N failures?
|
||||||
return kubecontainer.ErrRunContainer, err.Error()
|
return kubecontainer.ErrRunContainer, err.Error()
|
||||||
|
@ -275,7 +275,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
|
|||||||
|
|
||||||
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
|
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
|
||||||
// the container runtime to set parameters for launching a container.
|
// the container runtime to set parameters for launching a container.
|
||||||
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP, hostIP string) (*kubecontainer.RunContainerOptions, bool, error) {
|
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) {
|
||||||
var err error
|
var err error
|
||||||
useClusterFirstPolicy := false
|
useClusterFirstPolicy := false
|
||||||
cgroupParent := kl.GetPodCgroupParent(pod)
|
cgroupParent := kl.GetPodCgroupParent(pod)
|
||||||
@ -299,7 +299,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP, hostIP)
|
opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -386,7 +386,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make the environment variables for a pod in the given namespace.
|
// Make the environment variables for a pod in the given namespace.
|
||||||
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP, hostIP string) ([]kubecontainer.EnvVar, error) {
|
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
|
||||||
var result []kubecontainer.EnvVar
|
var result []kubecontainer.EnvVar
|
||||||
// Note: These are added to the docker Config, but are not included in the checksum computed
|
// Note: These are added to the docker Config, but are not included in the checksum computed
|
||||||
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
|
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
|
||||||
@ -506,7 +506,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
// Step 1b: resolve alternate env var sources
|
// Step 1b: resolve alternate env var sources
|
||||||
switch {
|
switch {
|
||||||
case envVar.ValueFrom.FieldRef != nil:
|
case envVar.ValueFrom.FieldRef != nil:
|
||||||
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, hostIP)
|
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
@ -607,7 +607,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||||||
|
|
||||||
// podFieldSelectorRuntimeValue returns the runtime value of the given
|
// podFieldSelectorRuntimeValue returns the runtime value of the given
|
||||||
// selector for a pod.
|
// selector for a pod.
|
||||||
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP, hostIP string) (string, error) {
|
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
|
||||||
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
|
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -618,7 +618,12 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod
|
|||||||
case "spec.serviceAccountName":
|
case "spec.serviceAccountName":
|
||||||
return pod.Spec.ServiceAccountName, nil
|
return pod.Spec.ServiceAccountName, nil
|
||||||
case "status.hostIP":
|
case "status.hostIP":
|
||||||
return hostIP, nil
|
hostIP, err := kl.GetHostIP()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hostIP.String(), nil
|
||||||
case "status.podIP":
|
case "status.podIP":
|
||||||
return podIP, nil
|
return podIP, nil
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) {
|
|||||||
options := make([]*kubecontainer.RunContainerOptions, 4)
|
options := make([]*kubecontainer.RunContainerOptions, 4)
|
||||||
for i, pod := range pods {
|
for i, pod := range pods {
|
||||||
var err error
|
var err error
|
||||||
options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "")
|
options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to generate container options: %v", err)
|
t.Fatalf("failed to generate container options: %v", err)
|
||||||
}
|
}
|
||||||
@ -220,7 +220,7 @@ func TestGenerateRunContainerOptions_DNSConfigurationParams(t *testing.T) {
|
|||||||
kubelet.resolverConfig = "/etc/resolv.conf"
|
kubelet.resolverConfig = "/etc/resolv.conf"
|
||||||
for i, pod := range pods {
|
for i, pod := range pods {
|
||||||
var err error
|
var err error
|
||||||
options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "", "")
|
options[i], _, err = kubelet.GenerateRunContainerOptions(pod, &v1.Container{}, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to generate container options: %v", err)
|
t.Fatalf("failed to generate container options: %v", err)
|
||||||
}
|
}
|
||||||
@ -521,7 +521,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||||||
{Name: "POD_NODE_NAME", Value: "node-name"},
|
{Name: "POD_NODE_NAME", Value: "node-name"},
|
||||||
{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
|
{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
|
||||||
{Name: "POD_IP", Value: "1.2.3.4"},
|
{Name: "POD_IP", Value: "1.2.3.4"},
|
||||||
{Name: "HOST_IP", Value: "5.6.7.8"},
|
{Name: "HOST_IP", Value: testKubeletHostIP},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1154,9 +1154,8 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
podIP := "1.2.3.4"
|
podIP := "1.2.3.4"
|
||||||
hostIP := "5.6.7.8"
|
|
||||||
|
|
||||||
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, hostIP)
|
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP)
|
||||||
select {
|
select {
|
||||||
case e := <-fakeRecorder.Events:
|
case e := <-fakeRecorder.Events:
|
||||||
assert.Equal(t, tc.expectedEvent, e)
|
assert.Equal(t, tc.expectedEvent, e)
|
||||||
|
@ -79,6 +79,7 @@ func init() {
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
testKubeletHostname = "127.0.0.1"
|
testKubeletHostname = "127.0.0.1"
|
||||||
|
testKubeletHostIP = "127.0.0.1"
|
||||||
|
|
||||||
testReservationCPU = "200m"
|
testReservationCPU = "200m"
|
||||||
testReservationMemory = "100M"
|
testReservationMemory = "100M"
|
||||||
@ -166,7 +167,31 @@ func newTestKubeletWithImageList(
|
|||||||
kubelet.masterServiceNamespace = metav1.NamespaceDefault
|
kubelet.masterServiceNamespace = metav1.NamespaceDefault
|
||||||
kubelet.serviceLister = testServiceLister{}
|
kubelet.serviceLister = testServiceLister{}
|
||||||
kubelet.nodeLister = testNodeLister{}
|
kubelet.nodeLister = testNodeLister{}
|
||||||
kubelet.nodeInfo = testNodeInfo{}
|
kubelet.nodeInfo = testNodeInfo{
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: string(kubelet.nodeName),
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Conditions: []v1.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: v1.NodeReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "Ready",
|
||||||
|
Message: "Node ready",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Addresses: []v1.NodeAddress{
|
||||||
|
{
|
||||||
|
Type: v1.NodeInternalIP,
|
||||||
|
Address: testKubeletHostIP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
kubelet.recorder = fakeRecorder
|
kubelet.recorder = fakeRecorder
|
||||||
if err := kubelet.setupDataDirs(); err != nil {
|
if err := kubelet.setupDataDirs(); err != nil {
|
||||||
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
||||||
|
@ -51,7 +51,7 @@ import (
|
|||||||
// * create the container
|
// * create the container
|
||||||
// * start the container
|
// * start the container
|
||||||
// * run the post start lifecycle hooks (if applicable)
|
// * run the post start lifecycle hooks (if applicable)
|
||||||
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP, hostIP string) (string, error) {
|
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
|
||||||
// Step 1: pull the image.
|
// Step 1: pull the image.
|
||||||
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets)
|
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -72,7 +72,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
|||||||
restartCount = containerStatus.RestartCount + 1
|
restartCount = containerStatus.RestartCount + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, hostIP, imageRef)
|
containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
|
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
|
||||||
return "Generate Container Config Failed", err
|
return "Generate Container Config Failed", err
|
||||||
@ -131,8 +131,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
|||||||
}
|
}
|
||||||
|
|
||||||
// generateContainerConfig generates container config for kubelet runtime v1.
|
// generateContainerConfig generates container config for kubelet runtime v1.
|
||||||
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, hostIP, imageRef string) (*runtimeapi.ContainerConfig, error) {
|
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, error) {
|
||||||
opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, hostIP)
|
opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -604,12 +604,6 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
|||||||
podIP = podStatus.IP
|
podIP = podStatus.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
rawHostIP, err := m.runtimeHelper.GetHostIP()
|
|
||||||
hostIP := rawHostIP.String()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 4: Create a sandbox for the pod if necessary.
|
// Step 4: Create a sandbox for the pod if necessary.
|
||||||
podSandboxID := podContainerChanges.SandboxID
|
podSandboxID := podContainerChanges.SandboxID
|
||||||
if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 {
|
if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 {
|
||||||
@ -686,7 +680,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
||||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil {
|
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
|
||||||
startContainerResult.Fail(err, msg)
|
startContainerResult.Fail(err, msg)
|
||||||
utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
|
utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
|
||||||
return
|
return
|
||||||
@ -720,7 +714,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, hostIP); err != nil {
|
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
|
||||||
startContainerResult.Fail(err, msg)
|
startContainerResult.Fail(err, msg)
|
||||||
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||||
continue
|
continue
|
||||||
|
@ -146,7 +146,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
|
|||||||
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
||||||
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
||||||
|
|
||||||
containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", "", template.container.Image)
|
containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image)
|
||||||
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
||||||
|
|
||||||
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
||||||
|
@ -607,7 +607,7 @@ func setApp(imgManifest *appcschema.ImageManifest, c *v1.Container,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makePodManifest transforms a kubelet pod spec to the rkt pod manifest.
|
// makePodManifest transforms a kubelet pod spec to the rkt pod manifest.
|
||||||
func (r *Runtime) makePodManifest(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) {
|
func (r *Runtime) makePodManifest(pod *v1.Pod, podIP string, pullSecrets []v1.Secret) (*appcschema.PodManifest, error) {
|
||||||
manifest := appcschema.BlankPodManifest()
|
manifest := appcschema.BlankPodManifest()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), r.requestTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), r.requestTimeout)
|
||||||
@ -654,7 +654,7 @@ func (r *Runtime) makePodManifest(pod *v1.Pod, podIP, hostIP string, pullSecrets
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
err := r.newAppcRuntimeApp(pod, podIP, hostIP, c, requiresPrivileged, pullSecrets, manifest)
|
err := r.newAppcRuntimeApp(pod, podIP, c, requiresPrivileged, pullSecrets, manifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -776,7 +776,7 @@ func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions,
|
|||||||
return &mnt, nil
|
return &mnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP, hostIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error {
|
func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, requiresPrivileged bool, pullSecrets []v1.Secret, manifest *appcschema.PodManifest) error {
|
||||||
var annotations appctypes.Annotations = []appctypes.Annotation{
|
var annotations appctypes.Annotations = []appctypes.Annotation{
|
||||||
{
|
{
|
||||||
Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
|
Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno),
|
||||||
@ -810,7 +810,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP, hostIP string, c v1.Cont
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: determine how this should be handled for rkt
|
// TODO: determine how this should be handled for rkt
|
||||||
opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP, hostIP)
|
opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1135,9 +1135,9 @@ func constructSyslogIdentifier(generateName string, podName string) string {
|
|||||||
//
|
//
|
||||||
// On success, it will return a string that represents name of the unit file
|
// On success, it will return a string that represents name of the unit file
|
||||||
// and the runtime pod.
|
// and the runtime pod.
|
||||||
func (r *Runtime) preparePod(pod *v1.Pod, podIP, hostIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) {
|
func (r *Runtime) preparePod(pod *v1.Pod, podIP string, pullSecrets []v1.Secret, netnsName string) (string, *kubecontainer.Pod, error) {
|
||||||
// Generate the appc pod manifest from the k8s pod spec.
|
// Generate the appc pod manifest from the k8s pod spec.
|
||||||
manifest, err := r.makePodManifest(pod, podIP, hostIP, pullSecrets)
|
manifest, err := r.makePodManifest(pod, podIP, pullSecrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
@ -1349,13 +1349,7 @@ func (r *Runtime) RunPod(pod *v1.Pod, pullSecrets []v1.Secret) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rawHostIP, err := r.runtimeHelper.GetHostIP()
|
name, runtimePod, prepareErr := r.preparePod(pod, podIP, pullSecrets, netnsName)
|
||||||
hostIP := rawHostIP.String()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Failed to get Host IP for pod: %s; %v", format.Pod(pod), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
name, runtimePod, prepareErr := r.preparePod(pod, podIP, hostIP, pullSecrets, netnsName)
|
|
||||||
|
|
||||||
// Set container references and generate events.
|
// Set container references and generate events.
|
||||||
// If preparedPod fails, then send out 'failed' events for each container.
|
// If preparedPod fails, then send out 'failed' events for each container.
|
||||||
|
@ -1902,7 +1902,7 @@ func TestMakePodManifestAnnotations(t *testing.T) {
|
|||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
hint := fmt.Sprintf("case #%d", i)
|
hint := fmt.Sprintf("case #%d", i)
|
||||||
|
|
||||||
result, err := r.makePodManifest(testCase.in, "", "", []v1.Secret{})
|
result, err := r.makePodManifest(testCase.in, "", []v1.Secret{})
|
||||||
assert.Equal(t, testCase.outerr, err, hint)
|
assert.Equal(t, testCase.outerr, err, hint)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sort.Sort(annotationsByName(result.Annotations))
|
sort.Sort(annotationsByName(result.Annotations))
|
||||||
|
Loading…
Reference in New Issue
Block a user