mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Add namespace targeting to the kubelet
This commit is contained in:
parent
89714227ff
commit
9a6d50cb2a
@ -41,7 +41,9 @@ import (
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
@ -86,13 +88,52 @@ func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container
|
||||
m.recorder.Event(ref, eventType, reason, eventMessage)
|
||||
}
|
||||
|
||||
// startSpec wraps the spec required to start a container, either a regular/init container
|
||||
// or an ephemeral container. Ephemeral containers contain all the fields of regular/init
|
||||
// containers, plus some additional fields. In both cases startSpec.container will be set.
|
||||
type startSpec struct {
|
||||
container *v1.Container
|
||||
ephemeralContainer *v1.EphemeralContainer
|
||||
}
|
||||
|
||||
func containerStartSpec(c *v1.Container) *startSpec {
|
||||
return &startSpec{container: c}
|
||||
}
|
||||
|
||||
func ephemeralContainerStartSpec(ec *v1.EphemeralContainer) *startSpec {
|
||||
return &startSpec{
|
||||
container: (*v1.Container)(&ec.EphemeralContainerCommon),
|
||||
ephemeralContainer: ec,
|
||||
}
|
||||
}
|
||||
|
||||
// getTargetID returns the kubecontainer.ContainerID for ephemeral container namespace
|
||||
// targeting. The target is stored as EphemeralContainer.TargetContainerName, which must be
|
||||
// resolved to a ContainerID using podStatus. The target container must already exist, which
|
||||
// usually isn't a problem since ephemeral containers aren't allowed at pod creation time.
|
||||
// This always returns nil when the EphemeralContainers feature is disabled.
|
||||
func (s *startSpec) getTargetID(podStatus *kubecontainer.PodStatus) (*kubecontainer.ContainerID, error) {
|
||||
if s.ephemeralContainer == nil || s.ephemeralContainer.TargetContainerName == "" || !utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
targetStatus := podStatus.FindContainerStatusByName(s.ephemeralContainer.TargetContainerName)
|
||||
if targetStatus == nil {
|
||||
return nil, fmt.Errorf("unable to find target container %v", s.ephemeralContainer.TargetContainerName)
|
||||
}
|
||||
|
||||
return &targetStatus.ID, nil
|
||||
}
|
||||
|
||||
// startContainer starts a container and returns a message indicates why it is failed on error.
|
||||
// It starts the container through the following steps:
|
||||
// * pull the image
|
||||
// * create the container
|
||||
// * start the container
|
||||
// * run the post start lifecycle hooks (if applicable)
|
||||
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
|
||||
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) {
|
||||
container := spec.container
|
||||
|
||||
// Step 1: pull the image.
|
||||
imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig)
|
||||
if err != nil {
|
||||
@ -115,7 +156,14 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
restartCount = containerStatus.RestartCount + 1
|
||||
}
|
||||
|
||||
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs)
|
||||
target, err := spec.getTargetID(podStatus)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
return s.Message(), ErrCreateContainerConfig
|
||||
}
|
||||
|
||||
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs, target)
|
||||
if cleanupAction != nil {
|
||||
defer cleanupAction()
|
||||
}
|
||||
@ -195,7 +243,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
}
|
||||
|
||||
// generateContainerConfig generates container config for kubelet runtime v1.
|
||||
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string) (*runtimeapi.ContainerConfig, func(), error) {
|
||||
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) {
|
||||
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -239,7 +287,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
|
||||
}
|
||||
|
||||
// set platform specific configurations.
|
||||
if err := m.applyPlatformSpecificContainerConfig(config, container, pod, uid, username); err != nil {
|
||||
if err := m.applyPlatformSpecificContainerConfig(config, container, pod, uid, username, nsTarget); err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
|
||||
|
@ -28,22 +28,28 @@ import (
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
)
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string) error {
|
||||
config.Linux = m.generateLinuxContainerConfig(container, pod, uid, username)
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
config.Linux = m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget)
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) *runtimeapi.LinuxContainerConfig {
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) *runtimeapi.LinuxContainerConfig {
|
||||
lc := &runtimeapi.LinuxContainerConfig{
|
||||
Resources: &runtimeapi.LinuxContainerResources{},
|
||||
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
|
||||
}
|
||||
|
||||
if nsTarget != nil && lc.SecurityContext.NamespaceOptions.Pid == runtimeapi.NamespaceMode_CONTAINER {
|
||||
lc.SecurityContext.NamespaceOptions.Pid = runtimeapi.NamespaceMode_TARGET
|
||||
lc.SecurityContext.NamespaceOptions.TargetId = nsTarget.ID
|
||||
}
|
||||
|
||||
// set linux container resources
|
||||
var cpuShares int64
|
||||
cpuRequest := container.Resources.Requests.Cpu()
|
||||
|
@ -22,13 +22,17 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int) *runtimeapi.ContainerConfig {
|
||||
@ -57,7 +61,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
|
||||
Stdin: container.Stdin,
|
||||
StdinOnce: container.StdinOnce,
|
||||
Tty: container.TTY,
|
||||
Linux: m.generateLinuxContainerConfig(container, pod, new(int64), ""),
|
||||
Linux: m.generateLinuxContainerConfig(container, pod, new(int64), "", nil),
|
||||
Envs: envs,
|
||||
}
|
||||
return expectedConfig
|
||||
@ -93,7 +97,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
expectedConfig := makeExpectedConfig(m, pod, 0)
|
||||
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{})
|
||||
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
|
||||
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
|
||||
@ -124,7 +128,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{})
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
@ -136,7 +140,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
|
||||
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{})
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil)
|
||||
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
|
||||
}
|
||||
|
||||
@ -296,3 +300,70 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EphemeralContainers, true)()
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating test RuntimeManager: %v", err)
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
target *kubecontainer.ContainerID
|
||||
want *runtimeapi.NamespaceOption
|
||||
}{
|
||||
{
|
||||
"Default namespaces",
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&runtimeapi.NamespaceOption{
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
{
|
||||
"PID Namespace POD",
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test"},
|
||||
},
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&runtimeapi.NamespaceOption{
|
||||
Pid: runtimeapi.NamespaceMode_POD,
|
||||
},
|
||||
},
|
||||
{
|
||||
"PID Namespace TARGET",
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
&kubecontainer.ContainerID{Type: "docker", ID: "really-long-id-string"},
|
||||
&runtimeapi.NamespaceOption{
|
||||
Pid: runtimeapi.NamespaceMode_TARGET,
|
||||
TargetId: "really-long-id-string",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target)
|
||||
if diff := cmp.Diff(tc.want, got.SecurityContext.NamespaceOptions); diff != "" {
|
||||
t.Errorf("%v: diff (-want +got):\n%v", t.Name(), diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -17,17 +17,22 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
@ -325,7 +330,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now try to create a container, which should in turn invoke PostStart Hook
|
||||
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, testContainer, testPod, fakePodStatus, nil, "", []string{})
|
||||
_, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{})
|
||||
if err != nil {
|
||||
t.Errorf("startContainer error =%v", err)
|
||||
}
|
||||
@ -334,3 +339,72 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStartSpec(t *testing.T) {
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: "docker",
|
||||
ID: "docker-something-something",
|
||||
},
|
||||
Name: "target",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
spec *startSpec
|
||||
want *kubecontainer.ContainerID
|
||||
}{
|
||||
{
|
||||
"Regular Container",
|
||||
containerStartSpec(&v1.Container{
|
||||
Name: "test",
|
||||
}),
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"Ephemeral Container w/o Target",
|
||||
ephemeralContainerStartSpec(&v1.EphemeralContainer{
|
||||
EphemeralContainerCommon: v1.EphemeralContainerCommon{
|
||||
Name: "test",
|
||||
},
|
||||
}),
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"Ephemeral Container w/ Target",
|
||||
ephemeralContainerStartSpec(&v1.EphemeralContainer{
|
||||
EphemeralContainerCommon: v1.EphemeralContainerCommon{
|
||||
Name: "test",
|
||||
},
|
||||
TargetContainerName: "target",
|
||||
}),
|
||||
&kubecontainer.ContainerID{
|
||||
Type: "docker",
|
||||
ID: "docker-something-something",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EphemeralContainers, true)()
|
||||
if got, err := tc.spec.getTargetID(podStatus); err != nil {
|
||||
t.Fatalf("%v: getTargetID got unexpected error: %v", t.Name(), err)
|
||||
} else if diff := cmp.Diff(tc.want, got); diff != "" {
|
||||
t.Errorf("%v: getTargetID got unexpected result. diff:\n%v", t.Name(), diff)
|
||||
}
|
||||
})
|
||||
|
||||
// Test with feature disabled in self-contained section which can be removed when feature flag is removed.
|
||||
t.Run(fmt.Sprintf("%s (disabled)", tc.name), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EphemeralContainers, false)()
|
||||
if got, err := tc.spec.getTargetID(podStatus); err != nil {
|
||||
t.Fatalf("%v: getTargetID got unexpected error: %v", t.Name(), err)
|
||||
} else if got != nil {
|
||||
t.Errorf("%v: getTargetID got: %v, wanted nil", t.Name(), got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -21,9 +21,10 @@ package kuberuntime
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string) error {
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ package kuberuntime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -27,11 +28,12 @@ import (
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string) error {
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, _ *kubecontainer.ContainerID) error {
|
||||
windowsConfig, err := m.generateWindowsContainerConfig(container, pod, uid, username)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -777,20 +777,20 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// Helper containing boilerplate common to starting all types of containers.
|
||||
// typeName is a label used to describe this type of container in log messages,
|
||||
// currently: "container", "init container" or "ephemeral container"
|
||||
start := func(typeName string, container *v1.Container) error {
|
||||
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
|
||||
start := func(typeName string, spec *startSpec) error {
|
||||
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
|
||||
result.AddSyncResult(startContainerResult)
|
||||
|
||||
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
|
||||
isInBackOff, msg, err := m.doBackOff(pod, spec.container, podStatus, backOff)
|
||||
if isInBackOff {
|
||||
startContainerResult.Fail(err, msg)
|
||||
klog.V(4).Infof("Backing Off restarting %v %+v in pod %v", typeName, container, format.Pod(pod))
|
||||
klog.V(4).Infof("Backing Off restarting %v %+v in pod %v", typeName, spec.container, format.Pod(pod))
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Creating %v %+v in pod %v", typeName, container, format.Pod(pod))
|
||||
klog.V(4).Infof("Creating %v %+v in pod %v", typeName, spec.container, format.Pod(pod))
|
||||
// NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs.
|
||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
|
||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil {
|
||||
startContainerResult.Fail(err, msg)
|
||||
// known errors that are logged in other places are logged at higher levels here to avoid
|
||||
// repetitive log spam
|
||||
@ -812,15 +812,14 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// containers cannot be specified on pod creation.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
|
||||
for _, idx := range podContainerChanges.EphemeralContainersToStart {
|
||||
c := (*v1.Container)(&pod.Spec.EphemeralContainers[idx].EphemeralContainerCommon)
|
||||
start("ephemeral container", c)
|
||||
start("ephemeral container", ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 6: start the init container.
|
||||
if container := podContainerChanges.NextInitContainerToStart; container != nil {
|
||||
// Start the next init container.
|
||||
if err := start("init container", container); err != nil {
|
||||
if err := start("init container", containerStartSpec(container)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -830,7 +829,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
|
||||
// Step 7: start containers in podContainerChanges.ContainersToStart.
|
||||
for _, idx := range podContainerChanges.ContainersToStart {
|
||||
start("container", &pod.Spec.Containers[idx])
|
||||
start("container", containerStartSpec(&pod.Spec.Containers[idx]))
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -157,7 +157,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
|
||||
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
||||
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
||||
|
||||
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{})
|
||||
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil)
|
||||
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
||||
|
||||
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
||||
|
@ -216,6 +216,13 @@ enum NamespaceMode {
|
||||
// For example, a container with a PID namespace of NODE expects to view
|
||||
// all of the processes on the host running the kubelet.
|
||||
NODE = 2;
|
||||
// TARGET targets the namespace of another container. When this is specified,
|
||||
// a target_id must be specified in NamespaceOption and refer to a container
|
||||
// previously created with NamespaceMode CONTAINER. This containers namespace
|
||||
// will be made to match that of container target_id.
|
||||
// For example, a container with a PID namespace of TARGET expects to view
|
||||
// all of the processes that container target_id can view.
|
||||
TARGET = 3;
|
||||
}
|
||||
|
||||
// NamespaceOption provides options for Linux namespaces.
|
||||
@ -227,12 +234,16 @@ message NamespaceOption {
|
||||
// PID namespace for this container/sandbox.
|
||||
// Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER.
|
||||
// The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods.
|
||||
// Namespaces currently set by the kubelet: POD, CONTAINER, NODE
|
||||
// Namespaces currently set by the kubelet: POD, CONTAINER, NODE, TARGET
|
||||
NamespaceMode pid = 2;
|
||||
// IPC namespace for this container/sandbox.
|
||||
// Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API.
|
||||
// Namespaces currently set by the kubelet: POD, NODE
|
||||
NamespaceMode ipc = 3;
|
||||
// Target Container ID for NamespaceMode of TARGET. This container must have been
|
||||
// previously created in the same pod. It is not possible to specify different targets
|
||||
// for each namespace.
|
||||
string target_id = 4;
|
||||
}
|
||||
|
||||
// Int64Value is the wrapper of int64.
|
||||
|
Loading…
Reference in New Issue
Block a user