mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 01:40:13 +00:00
Merge pull request #111358 from ddebroy/hasnet1
Introduce PodHasNetwork condition for pods
This commit is contained in:
commit
2e1a4da8df
@ -629,6 +629,13 @@ const (
|
||||
// Enables controlling pod ranking on replicaset scale-down.
|
||||
PodDeletionCost featuregate.Feature = "PodDeletionCost"
|
||||
|
||||
// owner: @ddebroy
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables reporting of PodHasNetwork condition in pod status after pod
|
||||
// sandbox creation and network configuration completes successfully
|
||||
PodHasNetworkCondition featuregate.Feature = "PodHasNetworkCondition"
|
||||
|
||||
// owner: @egernst
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
@ -998,6 +1005,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
PodOverhead: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
PodSecurity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
@ -1430,14 +1430,12 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
||||
// internal pod status. This method should only be called from within sync*Pod methods.
|
||||
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
|
||||
|
||||
// use the previous pod status, or the api status, as the basis for this pod
|
||||
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
oldPodStatus = pod.Status
|
||||
}
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
|
||||
|
||||
// calculate the next phase and preserve reason
|
||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
s.Phase = getPhase(&pod.Spec, allStatus)
|
||||
@ -1499,6 +1497,9 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
}
|
||||
}
|
||||
// set all Kubelet-owned conditions
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodHasNetworkCondition) {
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodHasNetworkCondition(pod, podStatus))
|
||||
}
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, s.InitContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
|
||||
@ -1506,7 +1507,6 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
|
||||
// set HostIP and initialize PodIP/PodIPs for host network pods
|
||||
if kl.kubeClient != nil {
|
||||
hostIPs, err := kl.getHostIPsAnyWay()
|
||||
|
@ -46,7 +46,11 @@ import (
|
||||
// api.Registry.GroupOrDie(v1.GroupName).GroupVersions[0].String() is changed
|
||||
// to "v1"?
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
|
||||
@ -2478,18 +2482,30 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
}
|
||||
sandboxReadyStatus := &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: "10.0.0.10",
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
}
|
||||
now := metav1.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
currentStatus *kubecontainer.PodStatus
|
||||
unreadyContainer []string
|
||||
previousStatus v1.PodStatus
|
||||
expected v1.PodStatus
|
||||
name string
|
||||
pod *v1.Pod
|
||||
currentStatus *kubecontainer.PodStatus
|
||||
unreadyContainer []string
|
||||
previousStatus v1.PodStatus
|
||||
expected v1.PodStatus
|
||||
expectedPodHasNetworkCondition v1.PodCondition
|
||||
}{
|
||||
{
|
||||
name: "no current status, with previous statuses and deletion",
|
||||
name: "current status ready, with previous statuses and deletion",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
@ -2500,7 +2516,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
currentStatus: sandboxReadyStatus,
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
@ -2522,9 +2538,13 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 0)),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no current status, with previous statuses and no deletion",
|
||||
name: "current status ready, with previous statuses and no deletion",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
@ -2534,7 +2554,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
currentStatus: sandboxReadyStatus,
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
@ -2556,6 +2576,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminal phase cannot be changed (apiserver previous is succeeded)",
|
||||
@ -2591,6 +2615,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminal phase from previous status must remain terminal, restartAlways",
|
||||
@ -2632,6 +2660,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
Reason: "Test",
|
||||
Message: "test",
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminal phase from previous status must remain terminal, restartNever",
|
||||
@ -2680,6 +2712,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
Reason: "Test",
|
||||
Message: "test",
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "running can revert to pending",
|
||||
@ -2693,7 +2729,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
currentStatus: sandboxReadyStatus,
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
waitingState("containerA"),
|
||||
@ -2715,6 +2751,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(waitingStateWithReason("containerB", "ContainerCreating")),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reason and message are preserved when phase doesn't change",
|
||||
@ -2729,6 +2769,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "foo"},
|
||||
@ -2764,6 +2805,10 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo")),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reason and message are cleared when phase changes",
|
||||
@ -2778,6 +2823,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "c1"},
|
||||
@ -2817,22 +2863,32 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
||||
ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2")),
|
||||
},
|
||||
},
|
||||
expectedPodHasNetworkCondition: v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
|
||||
for _, name := range test.unreadyContainer {
|
||||
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
|
||||
}
|
||||
actual := kl.generateAPIPodStatus(test.pod, test.currentStatus)
|
||||
if !apiequality.Semantic.DeepEqual(test.expected, actual) {
|
||||
t.Fatalf("Unexpected status: %s", diff.ObjectReflectDiff(actual, test.expected))
|
||||
}
|
||||
})
|
||||
for _, enablePodHasNetworkCondition := range []bool{false, true} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodHasNetworkCondition, enablePodHasNetworkCondition)()
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
|
||||
for _, name := range test.unreadyContainer {
|
||||
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
|
||||
}
|
||||
actual := kl.generateAPIPodStatus(test.pod, test.currentStatus)
|
||||
if enablePodHasNetworkCondition {
|
||||
test.expected.Conditions = append([]v1.PodCondition{test.expectedPodHasNetworkCondition}, test.expected.Conditions...)
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(test.expected, actual) {
|
||||
t.Fatalf("Unexpected status: %s", diff.ObjectReflectDiff(actual, test.expected))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,40 +303,3 @@ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]str
|
||||
ProfileType: runtimeapi.SecurityProfile_Unconfined,
|
||||
}
|
||||
}
|
||||
|
||||
func ipcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil && pod.Spec.HostIPC {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
func networkNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil && pod.Spec.HostNetwork {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
func pidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil {
|
||||
if pod.Spec.HostPID {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace {
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
}
|
||||
// Note that PID does not default to the zero value for v1.Pod
|
||||
return runtimeapi.NamespaceMode_CONTAINER
|
||||
}
|
||||
|
||||
// namespacesForPod returns the runtimeapi.NamespaceOption for a given pod.
|
||||
// An empty or nil pod can be used to get the namespace defaults for v1.Pod.
|
||||
func namespacesForPod(pod *v1.Pod) *runtimeapi.NamespaceOption {
|
||||
return &runtimeapi.NamespaceOption{
|
||||
Ipc: ipcNamespaceForPod(pod),
|
||||
Network: networkNamespaceForPod(pod),
|
||||
Pid: pidNamespaceForPod(pod),
|
||||
}
|
||||
}
|
||||
|
@ -689,69 +689,3 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) {
|
||||
func getLocal(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
func TestNamespacesForPod(t *testing.T) {
|
||||
for desc, test := range map[string]struct {
|
||||
input *v1.Pod
|
||||
expected *runtimeapi.NamespaceOption
|
||||
}{
|
||||
"nil pod -> default v1 namespaces": {
|
||||
nil,
|
||||
&runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
"v1.Pod default namespaces": {
|
||||
&v1.Pod{},
|
||||
&runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
"Host Namespaces": {
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: true,
|
||||
HostNetwork: true,
|
||||
HostPID: true,
|
||||
},
|
||||
},
|
||||
&runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_NODE,
|
||||
Network: runtimeapi.NamespaceMode_NODE,
|
||||
Pid: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
},
|
||||
"Shared Process Namespace (feature enabled)": {
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
},
|
||||
},
|
||||
&runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_POD,
|
||||
},
|
||||
},
|
||||
"Shared Process Namespace, redundant flag (feature enabled)": {
|
||||
&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{false}[0],
|
||||
},
|
||||
},
|
||||
&runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase: %s", desc)
|
||||
actual := namespacesForPod(test.input)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
@ -462,48 +463,6 @@ type podActions struct {
|
||||
EphemeralContainersToStart []int
|
||||
}
|
||||
|
||||
// podSandboxChanged checks whether the spec of the pod is changed and returns
|
||||
// (changed, new attempt, original sandboxID if exist).
|
||||
func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
|
||||
if len(podStatus.SandboxStatuses) == 0 {
|
||||
klog.V(2).InfoS("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, 0, ""
|
||||
}
|
||||
|
||||
readySandboxCount := 0
|
||||
for _, s := range podStatus.SandboxStatuses {
|
||||
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
readySandboxCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
|
||||
sandboxStatus := podStatus.SandboxStatuses[0]
|
||||
if readySandboxCount > 1 {
|
||||
klog.V(2).InfoS("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod))
|
||||
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
klog.V(2).InfoS("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when network namespace changed.
|
||||
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod) {
|
||||
klog.V(2).InfoS("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, ""
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when the sandbox does not have an IP address.
|
||||
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network != nil && sandboxStatus.Network.Ip == "" {
|
||||
klog.V(2).InfoS("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
|
||||
}
|
||||
|
||||
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
|
||||
expectedHash := kubecontainer.HashContainer(container)
|
||||
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
|
||||
@ -525,7 +484,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo
|
||||
func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
|
||||
klog.V(5).InfoS("Syncing Pod", "pod", klog.KObj(pod))
|
||||
|
||||
createPodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
|
||||
createPodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)
|
||||
changes := podActions{
|
||||
KillPod: createPodSandbox,
|
||||
CreateSandbox: createPodSandbox,
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
@ -194,7 +195,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (
|
||||
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
|
||||
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
|
||||
}
|
||||
lc.SecurityContext.NamespaceOptions = namespacesForPod(pod)
|
||||
lc.SecurityContext.NamespaceOptions = runtimeutil.NamespacesForPod(pod)
|
||||
|
||||
if sc.FSGroup != nil && runtime.GOOS != "windows" {
|
||||
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
|
||||
|
@ -19,6 +19,7 @@ package kuberuntime
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
@ -52,7 +53,7 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
||||
}
|
||||
|
||||
// set namespace options and supplemental groups.
|
||||
synthesized.NamespaceOptions = namespacesForPod(pod)
|
||||
synthesized.NamespaceOptions = runtimeutil.NamespacesForPod(pod)
|
||||
podSc := pod.Spec.SecurityContext
|
||||
if podSc != nil {
|
||||
if podSc.FSGroup != nil {
|
||||
|
108
pkg/kubelet/kuberuntime/util/util.go
Normal file
108
pkg/kubelet/kuberuntime/util/util.go
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// PodSandboxChanged checks whether the spec of the pod is changed and returns
|
||||
// (changed, new attempt, original sandboxID if exist).
|
||||
func PodSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
|
||||
if len(podStatus.SandboxStatuses) == 0 {
|
||||
klog.V(2).InfoS("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, 0, ""
|
||||
}
|
||||
|
||||
readySandboxCount := 0
|
||||
for _, s := range podStatus.SandboxStatuses {
|
||||
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
readySandboxCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
|
||||
sandboxStatus := podStatus.SandboxStatuses[0]
|
||||
if readySandboxCount > 1 {
|
||||
klog.V(2).InfoS("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
klog.V(2).InfoS("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when network namespace changed.
|
||||
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != NetworkNamespaceForPod(pod) {
|
||||
klog.V(2).InfoS("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, ""
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when the sandbox does not have an IP address.
|
||||
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network != nil && sandboxStatus.Network.Ip == "" {
|
||||
klog.V(2).InfoS("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// IpcNamespaceForPod returns the runtimeapi.NamespaceMode
|
||||
// for the IPC namespace of a pod
|
||||
func IpcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil && pod.Spec.HostIPC {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
// NetworkNamespaceForPod returns the runtimeapi.NamespaceMode
|
||||
// for the network namespace of a pod
|
||||
func NetworkNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil && pod.Spec.HostNetwork {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
// PidNamespaceForPod returns the runtimeapi.NamespaceMode
|
||||
// for the PID namespace of a pod
|
||||
func PidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode {
|
||||
if pod != nil {
|
||||
if pod.Spec.HostPID {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace {
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
}
|
||||
// Note that PID does not default to the zero value for v1.Pod
|
||||
return runtimeapi.NamespaceMode_CONTAINER
|
||||
}
|
||||
|
||||
// NamespacesForPod returns the runtimeapi.NamespaceOption for a given pod.
|
||||
// An empty or nil pod can be used to get the namespace defaults for v1.Pod.
|
||||
func NamespacesForPod(pod *v1.Pod) *runtimeapi.NamespaceOption {
|
||||
return &runtimeapi.NamespaceOption{
|
||||
Ipc: IpcNamespaceForPod(pod),
|
||||
Network: NetworkNamespaceForPod(pod),
|
||||
Pid: PidNamespaceForPod(pod),
|
||||
}
|
||||
}
|
229
pkg/kubelet/kuberuntime/util/util_test.go
Normal file
229
pkg/kubelet/kuberuntime/util/util_test.go
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
func TestPodSandboxChanged(t *testing.T) {
|
||||
for desc, test := range map[string]struct {
|
||||
pod *v1.Pod
|
||||
status *kubecontainer.PodStatus
|
||||
expectedChanged bool
|
||||
expectedAttempt uint32
|
||||
expectedSandboxID string
|
||||
}{
|
||||
"Pod with no existing sandboxes": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{},
|
||||
expectedChanged: true,
|
||||
expectedAttempt: 0,
|
||||
expectedSandboxID: "",
|
||||
},
|
||||
"Pod with multiple ready sandbox statuses": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Id: "sandboxID2",
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(1)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
{
|
||||
Id: "sandboxID1",
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedChanged: true,
|
||||
expectedAttempt: 2,
|
||||
expectedSandboxID: "sandboxID2",
|
||||
},
|
||||
"Pod with no ready sandbox statuses": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Id: "sandboxID2",
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(1)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
{
|
||||
Id: "sandboxID1",
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedChanged: true,
|
||||
expectedAttempt: 2,
|
||||
expectedSandboxID: "sandboxID2",
|
||||
},
|
||||
"Pod with ready sandbox status but network namespace mismatch": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
},
|
||||
},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Id: "sandboxID1",
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
},
|
||||
},
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedChanged: true,
|
||||
expectedAttempt: 1,
|
||||
expectedSandboxID: "",
|
||||
},
|
||||
"Pod with ready sandbox status but no IP": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: false,
|
||||
},
|
||||
},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Id: "sandboxID1",
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: "",
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedChanged: true,
|
||||
expectedAttempt: 1,
|
||||
expectedSandboxID: "sandboxID1",
|
||||
},
|
||||
"Pod with ready sandbox status with IP": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: false,
|
||||
},
|
||||
},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Id: "sandboxID1",
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: "10.0.0.10",
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedChanged: false,
|
||||
expectedAttempt: 0,
|
||||
expectedSandboxID: "sandboxID1",
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
changed, attempt, id := PodSandboxChanged(test.pod, test.status)
|
||||
require.Equal(t, test.expectedChanged, changed)
|
||||
require.Equal(t, test.expectedAttempt, attempt)
|
||||
require.Equal(t, test.expectedSandboxID, id)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacesForPod(t *testing.T) {
|
||||
for desc, test := range map[string]struct {
|
||||
input *v1.Pod
|
||||
expected *runtimeapi.NamespaceOption
|
||||
}{
|
||||
"nil pod -> default v1 namespaces": {
|
||||
input: nil,
|
||||
expected: &runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
"v1.Pod default namespaces": {
|
||||
input: &v1.Pod{},
|
||||
expected: &runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
"Host Namespaces": {
|
||||
input: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: true,
|
||||
HostNetwork: true,
|
||||
HostPID: true,
|
||||
},
|
||||
},
|
||||
expected: &runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_NODE,
|
||||
Network: runtimeapi.NamespaceMode_NODE,
|
||||
Pid: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
},
|
||||
"Shared Process Namespace (feature enabled)": {
|
||||
input: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
},
|
||||
},
|
||||
expected: &runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_POD,
|
||||
},
|
||||
},
|
||||
"Shared Process Namespace, redundant flag (feature enabled)": {
|
||||
input: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
ShareProcessNamespace: &[]bool{false}[0],
|
||||
},
|
||||
},
|
||||
expected: &runtimeapi.NamespaceOption{
|
||||
Ipc: runtimeapi.NamespaceMode_POD,
|
||||
Network: runtimeapi.NamespaceMode_POD,
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
actual := NamespacesForPod(test.input)
|
||||
require.Equal(t, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
@ -22,6 +22,9 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -195,6 +198,24 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
|
||||
}
|
||||
}
|
||||
|
||||
func GeneratePodHasNetworkCondition(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodCondition {
|
||||
newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus)
|
||||
// if a new sandbox does not need to be created for a pod, it indicates that
|
||||
// a sandbox for the pod with networking configured already exists.
|
||||
// Otherwise, the kubelet needs to invoke the container runtime to create a
|
||||
// fresh sandbox and configure networking for the sandbox.
|
||||
if !newSandboxNeeded {
|
||||
return v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
return v1.PodCondition{
|
||||
Type: kubetypes.PodHasNetwork,
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
}
|
||||
|
||||
func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
|
||||
condition := v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
|
@ -21,7 +21,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
func TestGenerateContainersReadyCondition(t *testing.T) {
|
||||
@ -417,6 +422,77 @@ func TestGeneratePodInitializedCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodHasNetworkCondition(t *testing.T) {
|
||||
for desc, test := range map[string]struct {
|
||||
pod *v1.Pod
|
||||
status *kubecontainer.PodStatus
|
||||
expected v1.PodCondition
|
||||
}{
|
||||
"Empty pod status": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{},
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
"Pod sandbox status not ready": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_NOTREADY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
"Pod sandbox status ready but no IP configured": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: "",
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
"Pod sandbox status ready and IP configured": {
|
||||
pod: &v1.Pod{},
|
||||
status: &kubecontainer.PodStatus{
|
||||
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
|
||||
{
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{
|
||||
Ip: "10.0.0.10",
|
||||
},
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
|
||||
State: runtimeapi.PodSandboxState_SANDBOX_READY,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
test.expected.Type = kubetypes.PodHasNetwork
|
||||
condition := GeneratePodHasNetworkCondition(test.pod, test.status)
|
||||
require.Equal(t, test.expected.Type, condition.Type)
|
||||
require.Equal(t, test.expected.Status, condition.Status)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getPodCondition(conditionType v1.PodConditionType, status v1.ConditionStatus, reason, message string) v1.PodCondition {
|
||||
return v1.PodCondition{
|
||||
Type: conditionType,
|
||||
|
@ -483,6 +483,9 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
||||
// Set InitializedCondition.LastTransitionTime.
|
||||
updateLastTransitionTime(&status, &oldStatus, v1.PodInitialized)
|
||||
|
||||
// Set PodHasNetwork.LastTransitionTime.
|
||||
updateLastTransitionTime(&status, &oldStatus, kubetypes.PodHasNetwork)
|
||||
|
||||
// Set PodScheduledCondition.LastTransitionTime.
|
||||
updateLastTransitionTime(&status, &oldStatus, v1.PodScheduled)
|
||||
|
||||
|
@ -43,3 +43,13 @@ const (
|
||||
LimitedSwap = "LimitedSwap"
|
||||
UnlimitedSwap = "UnlimitedSwap"
|
||||
)
|
||||
|
||||
// Alpha conditions managed by Kubelet that are not yet part of the API. The
|
||||
// entries here should be moved to staging/src/k8s.io.api/core/v1/types.go
|
||||
// once the feature managing the condition graduates to Beta.
|
||||
const (
|
||||
// PodHasNetwork indicates networking has been configured successfully for the
|
||||
// pod and IP address(es) assigned. Images for containers specified in the pod
|
||||
// spec can be pulled and containers launched after this condition is true.
|
||||
PodHasNetwork = "PodHasNetwork"
|
||||
)
|
||||
|
@ -18,6 +18,8 @@ package types
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// PodConditionsByKubelet is the list of pod conditions owned by kubelet
|
||||
@ -35,5 +37,10 @@ func PodConditionByKubelet(conditionType v1.PodConditionType) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodHasNetworkCondition) {
|
||||
if conditionType == PodHasNetwork {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -20,14 +20,19 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestPodConditionByKubelet(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodHasNetworkCondition, true)()
|
||||
trueCases := []v1.PodConditionType{
|
||||
v1.PodScheduled,
|
||||
v1.PodReady,
|
||||
v1.PodInitialized,
|
||||
v1.ContainersReady,
|
||||
PodHasNetwork,
|
||||
}
|
||||
|
||||
for _, tc := range trueCases {
|
||||
|
228
test/e2e_node/pod_conditions_test.go
Normal file
228
test/e2e_node/pod_conditions_test.go
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Pod conditions managed by Kubelet", func() {
|
||||
f := framework.NewDefaultFramework("pod-conditions")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
|
||||
ginkgo.Context("including PodHasNetwork condition [Serial] [Feature:PodHasNetwork]", func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
initialConfig.FeatureGates = map[string]bool{
|
||||
string(features.PodHasNetworkCondition): true,
|
||||
}
|
||||
})
|
||||
ginkgo.It("a pod without init containers should report all conditions set in expected order after the pod is up", runPodReadyConditionsTest(f, false, true))
|
||||
ginkgo.It("a pod with init containers should report all conditions set in expected order after the pod is up", runPodReadyConditionsTest(f, true, true))
|
||||
ginkgo.It("a pod failing to mount volumes and without init containers should report scheduled and initialized conditions set", runPodFailingConditionsTest(f, false, true))
|
||||
ginkgo.It("a pod failing to mount volumes and with init containers should report just the scheduled condition set", runPodFailingConditionsTest(f, true, true))
|
||||
})
|
||||
|
||||
ginkgo.Context("without PodHasNetwork condition", func() {
|
||||
ginkgo.It("a pod without init containers should report all conditions set in expected order after the pod is up", runPodReadyConditionsTest(f, false, false))
|
||||
ginkgo.It("a pod with init containers should report all conditions set in expected order after the pod is up", runPodReadyConditionsTest(f, true, false))
|
||||
ginkgo.It("a pod failing to mount volumes and without init containers should report scheduled and initialized conditions set", runPodFailingConditionsTest(f, false, false))
|
||||
ginkgo.It("a pod failing to mount volumes and with init containers should report just the scheduled condition set", runPodFailingConditionsTest(f, true, false))
|
||||
})
|
||||
})
|
||||
|
||||
func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func() {
|
||||
return func() {
|
||||
ginkgo.By("creating a pod whose sandbox creation is blocked due to a missing volume")
|
||||
|
||||
p := webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers)
|
||||
p.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "cm",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "does-not-exist"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
||||
{
|
||||
Name: "cm",
|
||||
MountPath: "/config",
|
||||
},
|
||||
}
|
||||
|
||||
p = f.PodClient().Create(p)
|
||||
|
||||
ginkgo.By("waiting until kubelet has started trying to set up the pod and started to fail")
|
||||
|
||||
eventSelector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.name": p.Name,
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": events.FailedMountVolume,
|
||||
}.AsSelector().String()
|
||||
e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, "MountVolume.SetUp failed for volume", framework.PodEventTimeout)
|
||||
|
||||
p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking pod condition for a pod whose sandbox creation is blocked")
|
||||
|
||||
scheduledTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodScheduled, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify PodHasNetwork is not set (since sandboxcreation is blocked)
|
||||
if checkPodHasNetwork {
|
||||
_, err := getTransitionTimeForPodConditionWithStatus(p, kubetypes.PodHasNetwork, false)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
if hasInitContainers {
|
||||
// Verify PodInitialized is not set if init containers are present (since sandboxcreation is blocked)
|
||||
_, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, false)
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
// Verify PodInitialized is set if init containers are not present (since without init containers, it gets set very early)
|
||||
initializedTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, true)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(initializedTime.Before(scheduledTime), true, fmt.Sprintf("pod without init containers is initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
}
|
||||
|
||||
// Verify ContainersReady is not set (since sandboxcreation is blocked)
|
||||
_, err = getTransitionTimeForPodConditionWithStatus(p, v1.ContainersReady, false)
|
||||
framework.ExpectNoError(err)
|
||||
// Verify PodReady is not set (since sandboxcreation is blocked)
|
||||
_, err = getTransitionTimeForPodConditionWithStatus(p, v1.PodReady, false)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkPodHasNetwork bool) func() {
|
||||
return func() {
|
||||
ginkgo.By("creating a pod that successfully comes up in a ready/running state")
|
||||
|
||||
p := f.PodClient().Create(webserverPodSpec("pod-"+string(uuid.NewUUID()), "web1", "init1", hasInitContainers))
|
||||
e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)
|
||||
|
||||
p, err := f.PodClient().Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(isReady, true, "pod should be ready")
|
||||
|
||||
ginkgo.By("checking order of pod condition transitions for a pod with no container/sandbox restarts")
|
||||
|
||||
scheduledTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodScheduled, true)
|
||||
framework.ExpectNoError(err)
|
||||
initializedTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
condBeforeContainersReadyTransitionTime := initializedTime
|
||||
errSubstrIfContainersReadyTooEarly := "is initialized"
|
||||
if checkPodHasNetwork {
|
||||
hasNetworkTime, err := getTransitionTimeForPodConditionWithStatus(p, kubetypes.PodHasNetwork, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if hasInitContainers {
|
||||
// With init containers, verify the sequence of conditions is: Scheduled => HasNetwork => Initialized
|
||||
framework.ExpectNotEqual(hasNetworkTime.Before(scheduledTime), true, fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has network at: %v", initializedTime, hasNetworkTime))
|
||||
framework.ExpectNotEqual(initializedTime.Before(hasNetworkTime), true, fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has network at: %v", initializedTime, hasNetworkTime))
|
||||
} else {
|
||||
// Without init containers, verify the sequence of conditions is: Scheduled => Initialized => HasNetwork
|
||||
condBeforeContainersReadyTransitionTime = hasNetworkTime
|
||||
errSubstrIfContainersReadyTooEarly = "has network"
|
||||
framework.ExpectNotEqual(initializedTime.Before(scheduledTime), true, fmt.Sprintf("pod without init containers initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
framework.ExpectNotEqual(hasNetworkTime.Before(initializedTime), true, fmt.Sprintf("pod without init containers has network at: %v which is before pod is initialized at: %v", hasNetworkTime, initializedTime))
|
||||
}
|
||||
} else {
|
||||
// In the absence of HasNetwork feature disabled, verify the sequence is: Scheduled => Initialized
|
||||
framework.ExpectNotEqual(initializedTime.Before(scheduledTime), true, fmt.Sprintf("pod initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
}
|
||||
// Verify the next condition to get set is ContainersReady
|
||||
containersReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.ContainersReady, true)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(containersReadyTime.Before(condBeforeContainersReadyTransitionTime), true, fmt.Sprintf("containers ready at: %v which is before pod %s: %v", containersReadyTime, errSubstrIfContainersReadyTooEarly, initializedTime))
|
||||
|
||||
// Verify ContainersReady => PodReady
|
||||
podReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodReady, true)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNotEqual(podReadyTime.Before(containersReadyTime), true, fmt.Sprintf("pod ready at: %v which is before pod containers ready at: %v", podReadyTime, containersReadyTime))
|
||||
}
|
||||
}
|
||||
|
||||
func getTransitionTimeForPodConditionWithStatus(pod *v1.Pod, condType v1.PodConditionType, expectedStatus bool) (time.Time, error) {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == condType {
|
||||
if strings.EqualFold(string(cond.Status), strconv.FormatBool(expectedStatus)) {
|
||||
return cond.LastTransitionTime.Time, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("condition: %s found for pod but status: %s did not match expected status: %s", condType, cond.Status, strconv.FormatBool(expectedStatus))
|
||||
}
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("condition: %s not found for pod", condType)
|
||||
}
|
||||
|
||||
func webserverPodSpec(podName, containerName, initContainerName string, addInitContainer bool) *v1.Pod {
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"test-webserver"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if addInitContainer {
|
||||
p.Spec.InitContainers = []v1.Container{
|
||||
{
|
||||
Name: initContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "sleep 5s"},
|
||||
},
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
Loading…
Reference in New Issue
Block a user