Merge pull request #83123 from aramase/dualstack-downward-api

Dualstack downward api
This commit is contained in:
Kubernetes Prow Robot 2019-11-14 22:13:42 -08:00 committed by GitHub
commit 4e45328e65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 127 additions and 60 deletions

View File

@ -7668,7 +7668,7 @@
},
"fieldRef": {
"$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector",
"description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP."
"description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs."
},
"resourceFieldRef": {
"$ref": "#/definitions/io.k8s.api.core.v1.ResourceFieldSelector",

View File

@ -88,7 +88,8 @@ func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string,
"spec.schedulerName",
"status.phase",
"status.hostIP",
"status.podIP":
"status.podIP",
"status.podIPs":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host
case "spec.host":

View File

@ -168,6 +168,20 @@ func TestConvertDownwardAPIFieldLabel(t *testing.T) {
expectedLabel: "spec.nodeName",
expectedValue: "127.0.0.1",
},
{
version: "v1",
label: "status.podIPs",
value: "10.244.0.6,fd00::6",
expectedLabel: "status.podIPs",
expectedValue: "10.244.0.6,fd00::6",
},
{
version: "v1",
label: "status.podIPs",
value: "10.244.0.6",
expectedLabel: "status.podIPs",
expectedValue: "10.244.0.6",
},
}
for _, tc := range testCases {
label, value, err := ConvertDownwardAPIFieldLabel(tc.version, tc.label, tc.value)

View File

@ -1775,7 +1775,7 @@ type EnvVar struct {
// Only one of its fields may be set.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector
// Selects a resource of the container: only resources limits and requests

View File

@ -43,6 +43,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
"spec.serviceAccountName",
"status.phase",
"status.podIP",
"status.podIPs",
"status.nominatedNodeName":
return label, value, nil
// This is for backwards compatibility with old v1 clients which send spec.host

View File

@ -2097,7 +2097,10 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
"spec.nodeName",
"spec.serviceAccountName",
"status.hostIP",
"status.podIP")
"status.podIP",
// status.podIPs is populated even if IPv6DualStack feature gate
// is not enabled. This will work for single stack and dual stack.
"status.podIPs")
var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList {

View File

@ -4431,6 +4431,15 @@ func TestValidateEnv(t *testing.T) {
},
},
},
{
Name: "abc",
ValueFrom: &core.EnvVarSource{
FieldRef: &core.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIPs",
},
},
},
{
Name: "secret_value",
ValueFrom: &core.EnvVarSource{
@ -4662,7 +4671,7 @@ func TestValidateEnv(t *testing.T) {
},
},
}},
expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.labels": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP"`,
expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.labels": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP", "status.podIPs"`,
},
{
name: "metadata.annotations without subscript",
@ -4675,7 +4684,7 @@ func TestValidateEnv(t *testing.T) {
},
},
}},
expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.annotations": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP"`,
expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.annotations": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP", "status.podIPs"`,
},
{
name: "metadata.annotations with invalid key",
@ -4714,7 +4723,7 @@ func TestValidateEnv(t *testing.T) {
},
},
}},
expectedError: `valueFrom.fieldRef.fieldPath: Unsupported value: "status.phase": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP"`,
expectedError: `valueFrom.fieldRef.fieldPath: Unsupported value: "status.phase": supported values: "metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP", "status.podIPs"`,
},
}
for _, tc := range errorCases {

View File

@ -24,7 +24,7 @@ import (
"k8s.io/klog"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -46,7 +46,7 @@ type HandlerRunner interface {
// RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
type RuntimeHelper interface {
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
// of a pod.

View File

@ -34,7 +34,7 @@ type FakeRuntimeHelper struct {
Err error
}
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) {
func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
var opts kubecontainer.RunContainerOptions
if len(container.TerminationMessagePath) != 0 {
opts.PodContainerDir = f.PodContainerDir

View File

@ -128,15 +128,15 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) {
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain string, podIPs []string, podVolumes kubecontainer.VolumeMap, hu hostutil.HostUtils, subpather subpath.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) {
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
// - OS is not Windows
// Kubernetes will not mount /etc/hosts if:
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows"
klog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mountEtcHostsFile := len(podIPs) > 0 && runtime.GOOS != "windows"
klog.V(3).Infof("container: %v/%v/%v podIPs: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIPs, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
var cleanupAction func()
for i, mount := range container.VolumeMounts {
@ -258,7 +258,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
}
if mountEtcHostsFile {
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
hostsMount, err := makeHostsMount(podDir, podIPs, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, cleanupAction, err
}
@ -292,10 +292,11 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with.
func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
// in a pod are injected with. podIPs is provided instead of podIP as podIPs
// are present even if dual-stack feature flag is not enabled.
func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}
return &kubecontainer.Mount{
@ -309,7 +310,7 @@ func makeHostsMount(podDir, podIP, hostName, hostDomainName string, hostAliases
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
func ensureHostsFile(fileName string, hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) error {
var hostsFileContent []byte
var err error
@ -323,7 +324,7 @@ func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string, hostAlia
}
} else {
// if Pod is not using host network, create a managed hosts file with Pod IP and other information.
hostsFileContent = managedHostsFileContent(hostIP, hostName, hostDomainName, hostAliases)
hostsFileContent = managedHostsFileContent(hostIPs, hostName, hostDomainName, hostAliases)
}
return ioutil.WriteFile(fileName, hostsFileContent, 0644)
@ -342,9 +343,9 @@ func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]b
return buffer.Bytes(), nil
}
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IP and other
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IPs and other
// information.
func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
func managedHostsFileContent(hostIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
var buffer bytes.Buffer
buffer.WriteString(managedHostsHeader)
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
@ -354,9 +355,16 @@ func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliase
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
// host entry generated for all IPs in podIPs
// podIPs field is populated for clusters even
// dual-stack feature flag is not enabled.
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
}
} else {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
for _, hostIP := range hostIPs {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
}
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
return buffer.Bytes()
@ -433,7 +441,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) {
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) {
opts, err := kl.containerManager.GetResources(pod, container)
if err != nil {
return nil, nil, err
@ -459,13 +467,14 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
opts.Devices = append(opts.Devices, blkVolumes...)
}
envs, err := kl.makeEnvironmentVariables(pod, container, podIP)
envs, err := kl.makeEnvironmentVariables(pod, container, podIP, podIPs)
if err != nil {
return nil, nil, err
}
opts.Envs = append(opts.Envs, envs...)
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.hostutil, kl.subpather, opts.Envs)
// only podIPs is sent to makeMounts, as podIPs is populated even if dual-stack feature flag is not enabled.
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIPs, volumes, kl.hostutil, kl.subpather, opts.Envs)
if err != nil {
return nil, cleanupAction, err
}
@ -546,7 +555,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) ([]kubecontainer.EnvVar, error) {
if pod.Spec.EnableServiceLinks == nil {
return nil, fmt.Errorf("nil pod.spec.enableServiceLinks encountered, cannot construct envvars")
}
@ -670,7 +679,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP, podIPs)
if err != nil {
return result, err
}
@ -771,7 +780,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string, podIPs []string) (string, error) {
internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "")
if err != nil {
return "", err
@ -789,6 +798,8 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod
return hostIP.String(), nil
case "status.podIP":
return podIP, nil
case "status.podIPs":
return strings.Join(podIPs, ","), nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}

View File

@ -22,7 +22,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
_ "k8s.io/kubernetes/pkg/apis/core/install"
@ -249,7 +249,7 @@ func TestMakeMounts(t *testing.T) {
},
}
mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fhu, fsp, nil)
mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", []string{""}, tc.podVolumes, fhu, fsp, nil)
// validate only the error if we expect an error
if tc.expectErr {

View File

@ -97,7 +97,7 @@ func TestDisabledSubpath(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)()
for name, test := range cases {
_, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fhu, fsp, nil)
_, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", []string{}, podVolumes, fhu, fsp, nil)
if err != nil && !test.expectError {
t.Errorf("test %v failed: %v", name, err)
}
@ -243,14 +243,14 @@ func writeHostsFile(filename string, cfg string) (string, error) {
func TestManagedHostsFileContent(t *testing.T) {
testCases := []struct {
hostIP string
hostIPs []string
hostName string
hostDomainName string
hostAliases []v1.HostAlias
expectedContent string
}{
{
"123.45.67.89",
[]string{"123.45.67.89"},
"podFoo",
"",
[]v1.HostAlias{},
@ -265,7 +265,7 @@ fe00::2 ip6-allrouters
`,
},
{
"203.0.113.1",
[]string{"203.0.113.1"},
"podFoo",
"domainFoo",
[]v1.HostAlias{},
@ -280,7 +280,7 @@ fe00::2 ip6-allrouters
`,
},
{
"203.0.113.1",
[]string{"203.0.113.1"},
"podFoo",
"domainFoo",
[]v1.HostAlias{
@ -300,7 +300,7 @@ fe00::2 ip6-allrouters
`,
},
{
"203.0.113.1",
[]string{"203.0.113.1"},
"podFoo",
"domainFoo",
[]v1.HostAlias{
@ -319,12 +319,28 @@ fe00::2 ip6-allrouters
# Entries added by HostAliases.
123.45.67.89 foo bar baz
456.78.90.123 park doo boo
`,
},
{
[]string{"203.0.113.1", "fd00::6"},
"podFoo",
"domainFoo",
[]v1.HostAlias{},
`# Kubernetes-managed hosts file.
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
fe00::0 ip6-mcastprefix
fe00::1 ip6-allnodes
fe00::2 ip6-allrouters
203.0.113.1 podFoo.domainFoo podFoo
fd00::6 podFoo.domainFoo podFoo
`,
},
}
for _, testCase := range testCases {
actualContent := managedHostsFileContent(testCase.hostIP, testCase.hostName, testCase.hostDomainName, testCase.hostAliases)
actualContent := managedHostsFileContent(testCase.hostIPs, testCase.hostName, testCase.hostDomainName, testCase.hostAliases)
assert.Equal(t, testCase.expectedContent, string(actualContent), "hosts file content not expected")
}
}
@ -703,6 +719,15 @@ func TestMakeEnvironmentVariables(t *testing.T) {
},
},
},
{
Name: "POD_IPS",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIPs",
},
},
},
{
Name: "HOST_IP",
ValueFrom: &v1.EnvVarSource{
@ -722,6 +747,7 @@ func TestMakeEnvironmentVariables(t *testing.T) {
{Name: "POD_NODE_NAME", Value: "node-name"},
{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
{Name: "POD_IP", Value: "1.2.3.4"},
{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
{Name: "HOST_IP", Value: testKubeletHostIP},
},
},
@ -1627,8 +1653,9 @@ func TestMakeEnvironmentVariables(t *testing.T) {
},
}
podIP := "1.2.3.4"
podIPs := []string{"1.2.3.4,fd00::6"}
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP)
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, podIPs)
select {
case e := <-fakeRecorder.Events:
assert.Equal(t, tc.expectedEvent, e)

View File

@ -84,7 +84,7 @@ func TestMakeMountsWindows(t *testing.T) {
fhu := hostutil.NewFakeHostUtil(nil)
fsp := &subpath.FakeSubpath{}
mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes, fhu, fsp, nil)
mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil)
expectedMounts := []kubecontainer.Mount{
{

View File

@ -92,7 +92,7 @@ func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
// Step 1: pull the image.
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig)
if err != nil {
@ -115,7 +115,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
restartCount = containerStatus.RestartCount + 1
}
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef)
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs)
if cleanupAction != nil {
defer cleanupAction()
}
@ -195,8 +195,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
}
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs)
if err != nil {
return nil, nil, err
}

View File

@ -31,7 +31,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
container := &pod.Spec.Containers[containerIndex]
podIP := ""
restartCount := 0
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, []string{podIP})
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount)
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
@ -89,7 +89,7 @@ func TestGenerateContainerConfig(t *testing.T) {
}
expectedConfig := makeExpectedConfig(m, pod, 0)
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image)
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{})
assert.NoError(t, err)
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
@ -120,7 +120,7 @@ func TestGenerateContainerConfig(t *testing.T) {
},
}
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image)
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{})
assert.Error(t, err)
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
@ -132,6 +132,6 @@ func TestGenerateContainerConfig(t *testing.T) {
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image)
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{})
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
}

View File

@ -325,13 +325,12 @@ func TestLifeCycleHook(t *testing.T) {
}
// Now try to create a container, which should in turn invoke PostStart Hook
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "")
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "", []string{})
if err != nil {
t.Errorf("startContainer error =%v", err)
}
if fakeRunner.Cmd[0] != cmdPostStart.PostStart.Exec.Command[0] {
t.Errorf("CMD PostStart hook was not invoked")
}
})
}

View File

@ -699,12 +699,13 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// by container garbage collector.
m.pruneInitContainersBeforeStart(pod, podStatus)
// We pass the value of the PRIMARY podIP down to generatePodSandboxConfig and
// generateContainerConfig, which in turn passes it to various other
// functions, in order to facilitate functionality that requires this
// value (hosts file and downward API) and avoid races determining
// We pass the value of the PRIMARY podIP and list of podIPs down to
// generatePodSandboxConfig and generateContainerConfig, which in turn
// passes it to various other functions, in order to facilitate functionality
// that requires this value (hosts file and downward API) and avoid races determining
// the pod IP in cases where a container requires restart but the
// podIP isn't in the status manager yet.
// podIP isn't in the status manager yet. The list of podIPs is used to
// generate the hosts file.
//
// We default to the IPs in the passed-in pod status, and overwrite them if the
// sandbox needs to be (re)started.
@ -790,7 +791,8 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
}
klog.V(4).Infof("Creating %v %+v in pod %v", typeName, container, format.Pod(pod))
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
startContainerResult.Fail(err, msg)
// known errors that are logged in other places are logged at higher levels here to avoid
// repetitive log spam

View File

@ -157,7 +157,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image)
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{})
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)

View File

@ -1142,7 +1142,7 @@ message EnvVar {
// EnvVarSource represents a source for the value of an EnvVar.
message EnvVarSource {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
optional ObjectFieldSelector fieldRef = 1;

View File

@ -1846,7 +1846,7 @@ type EnvVar struct {
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests

View File

@ -566,7 +566,7 @@ func (EnvVar) SwaggerDoc() map[string]string {
var map_EnvVarSource = map[string]string{
"": "EnvVarSource represents a source for the value of an EnvVar.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
"configMapKeyRef": "Selects a key of a ConfigMap.",
"secretKeyRef": "Selects a key of a secret in the pod's namespace",