mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #125333 from ardaguclu/kep-4292-beta
KEP-4292: Preparations to promote custom profiling in kubectl debug
This commit is contained in:
commit
c20aa764d7
@ -56,6 +56,7 @@ import (
|
|||||||
"k8s.io/kubectl/pkg/util/templates"
|
"k8s.io/kubectl/pkg/util/templates"
|
||||||
"k8s.io/kubectl/pkg/util/term"
|
"k8s.io/kubectl/pkg/util/term"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -211,8 +212,8 @@ func (o *DebugOptions) AddFlags(cmd *cobra.Command) {
|
|||||||
cmd.Flags().StringVar(&o.TargetContainer, "target", "", i18n.T("When using an ephemeral container, target processes in this container name."))
|
cmd.Flags().StringVar(&o.TargetContainer, "target", "", i18n.T("When using an ephemeral container, target processes in this container name."))
|
||||||
cmd.Flags().BoolVarP(&o.TTY, "tty", "t", o.TTY, i18n.T("Allocate a TTY for the debugging container."))
|
cmd.Flags().BoolVarP(&o.TTY, "tty", "t", o.TTY, i18n.T("Allocate a TTY for the debugging container."))
|
||||||
cmd.Flags().StringVar(&o.Profile, "profile", ProfileLegacy, i18n.T(`Options are "legacy", "general", "baseline", "netadmin", "restricted" or "sysadmin".`))
|
cmd.Flags().StringVar(&o.Profile, "profile", ProfileLegacy, i18n.T(`Options are "legacy", "general", "baseline", "netadmin", "restricted" or "sysadmin".`))
|
||||||
if cmdutil.DebugCustomProfile.IsEnabled() {
|
if !cmdutil.DebugCustomProfile.IsDisabled() {
|
||||||
cmd.Flags().StringVar(&o.CustomProfileFile, "custom", o.CustomProfileFile, i18n.T("Path to a JSON file containing a partial container spec to customize built-in debug profiles."))
|
cmd.Flags().StringVar(&o.CustomProfileFile, "custom", o.CustomProfileFile, i18n.T("Path to a JSON or YAML file containing a partial container spec to customize built-in debug profiles."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,7 +294,10 @@ func (o *DebugOptions) Complete(restClientGetter genericclioptions.RESTClientGet
|
|||||||
|
|
||||||
err = json.Unmarshal(customProfileBytes, &o.CustomProfile)
|
err = json.Unmarshal(customProfileBytes, &o.CustomProfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s does not contain a valid container spec: %w", o.CustomProfileFile, err)
|
err = yaml.Unmarshal(customProfileBytes, &o.CustomProfile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s does not contain a valid container spec: %w", o.CustomProfileFile, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,8 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -2088,31 +2086,29 @@ func TestGenerateNodeDebugPodCustomProfile(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) {
|
var err error
|
||||||
var err error
|
kflags := KeepFlags{
|
||||||
kflags := KeepFlags{
|
Labels: tc.opts.KeepLabels,
|
||||||
Labels: tc.opts.KeepLabels,
|
Annotations: tc.opts.KeepAnnotations,
|
||||||
Annotations: tc.opts.KeepAnnotations,
|
Liveness: tc.opts.KeepLiveness,
|
||||||
Liveness: tc.opts.KeepLiveness,
|
Readiness: tc.opts.KeepReadiness,
|
||||||
Readiness: tc.opts.KeepReadiness,
|
Startup: tc.opts.KeepStartup,
|
||||||
Startup: tc.opts.KeepStartup,
|
InitContainers: tc.opts.KeepInitContainers,
|
||||||
InitContainers: tc.opts.KeepInitContainers,
|
}
|
||||||
}
|
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
||||||
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
||||||
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
}
|
||||||
}
|
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
||||||
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
|
||||||
|
|
||||||
pod, err := tc.opts.generateNodeDebugPod(tc.node)
|
pod, err := tc.opts.generateNodeDebugPod(tc.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to generate node debug pod: %v", err)
|
t.Fatalf("Fail to generate node debug pod: %v", err)
|
||||||
}
|
}
|
||||||
tc.expected.Name = pod.Name
|
tc.expected.Name = pod.Name
|
||||||
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
||||||
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2296,31 +2292,29 @@ func TestGenerateCopyDebugPodCustomProfile(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) {
|
var err error
|
||||||
var err error
|
kflags := KeepFlags{
|
||||||
kflags := KeepFlags{
|
Labels: tc.opts.KeepLabels,
|
||||||
Labels: tc.opts.KeepLabels,
|
Annotations: tc.opts.KeepAnnotations,
|
||||||
Annotations: tc.opts.KeepAnnotations,
|
Liveness: tc.opts.KeepLiveness,
|
||||||
Liveness: tc.opts.KeepLiveness,
|
Readiness: tc.opts.KeepReadiness,
|
||||||
Readiness: tc.opts.KeepReadiness,
|
Startup: tc.opts.KeepStartup,
|
||||||
Startup: tc.opts.KeepStartup,
|
InitContainers: tc.opts.KeepInitContainers,
|
||||||
InitContainers: tc.opts.KeepInitContainers,
|
}
|
||||||
}
|
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
||||||
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
||||||
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
}
|
||||||
}
|
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
||||||
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
|
||||||
|
|
||||||
pod, dc, err := tc.opts.generatePodCopyWithDebugContainer(tc.copyPod)
|
pod, dc, err := tc.opts.generatePodCopyWithDebugContainer(tc.copyPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to generate node debug pod: %v", err)
|
t.Fatalf("Fail to generate node debug pod: %v", err)
|
||||||
}
|
}
|
||||||
tc.expected.Spec.Containers[0].Name = dc
|
tc.expected.Spec.Containers[0].Name = dc
|
||||||
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
||||||
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2510,31 +2504,29 @@ func TestGenerateEphemeralDebugPodCustomProfile(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) {
|
var err error
|
||||||
var err error
|
kflags := KeepFlags{
|
||||||
kflags := KeepFlags{
|
Labels: tc.opts.KeepLabels,
|
||||||
Labels: tc.opts.KeepLabels,
|
Annotations: tc.opts.KeepAnnotations,
|
||||||
Annotations: tc.opts.KeepAnnotations,
|
Liveness: tc.opts.KeepLiveness,
|
||||||
Liveness: tc.opts.KeepLiveness,
|
Readiness: tc.opts.KeepReadiness,
|
||||||
Readiness: tc.opts.KeepReadiness,
|
Startup: tc.opts.KeepStartup,
|
||||||
Startup: tc.opts.KeepStartup,
|
InitContainers: tc.opts.KeepInitContainers,
|
||||||
InitContainers: tc.opts.KeepInitContainers,
|
}
|
||||||
}
|
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
||||||
tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags)
|
if err != nil {
|
||||||
if err != nil {
|
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
||||||
t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err)
|
}
|
||||||
}
|
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
||||||
tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard()
|
|
||||||
|
|
||||||
pod, ec, err := tc.opts.generateDebugContainer(tc.copyPod)
|
pod, ec, err := tc.opts.generateDebugContainer(tc.copyPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Fail to generate node debug pod: %v", err)
|
t.Fatalf("Fail to generate node debug pod: %v", err)
|
||||||
}
|
}
|
||||||
tc.expected.Spec.EphemeralContainers[0].Name = ec.Name
|
tc.expected.Spec.EphemeralContainers[0].Name = ec.Name
|
||||||
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
if diff := cmp.Diff(tc.expected, pod); diff != "" {
|
||||||
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
t.Error("unexpected diff in generated object: (-want +got):\n", diff)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -567,3 +567,95 @@ run_kubectl_debug_netadmin_node_tests() {
|
|||||||
set +o nounset
|
set +o nounset
|
||||||
set +o errexit
|
set +o errexit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
run_kubectl_debug_custom_profile_tests() {
|
||||||
|
set -o nounset
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
create_and_use_new_namespace
|
||||||
|
kube::log::status "Testing kubectl debug custom profile"
|
||||||
|
|
||||||
|
### Pod Troubleshooting by ephemeral containers with netadmin profile
|
||||||
|
# Pre-Condition: Pod "nginx" is created
|
||||||
|
kubectl run target-debug "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}"
|
||||||
|
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target-debug:'
|
||||||
|
|
||||||
|
cat > "${TMPDIR:-/tmp}"/custom_profile.json << EOF
|
||||||
|
{
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "ENV_VAR1",
|
||||||
|
"value": "value1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "ENV_VAR2",
|
||||||
|
"value": "value2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "${TMPDIR:-/tmp}"/custom_profile.yaml << EOF
|
||||||
|
env:
|
||||||
|
- name: ENV_VAR3
|
||||||
|
value: value3
|
||||||
|
- name: ENV_VAR4
|
||||||
|
value: value4
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Command: add a new debug container with general profile
|
||||||
|
output_message=$(kubectl debug target-debug -it --image=busybox --attach=false -c debug-container --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.json "${kube_flags[@]:?}")
|
||||||
|
|
||||||
|
# Post-Conditions
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 0)).name}}' 'ENV_VAR1'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 0)).value}}' 'value1'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 1)).name}}' 'ENV_VAR2'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 1)).value}}' 'value2'
|
||||||
|
|
||||||
|
# Command: add a new debug container with general profile
|
||||||
|
kubectl debug target-debug -it --image=busybox --attach=false -c debug-container-2 --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.yaml "${kube_flags[@]:?}"
|
||||||
|
|
||||||
|
# Post-Conditions
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:debug-container-2:'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 0)).name}}' 'ENV_VAR3'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 0)).value}}' 'value3'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 1)).name}}' 'ENV_VAR4'
|
||||||
|
kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 1)).value}}' 'value4'
|
||||||
|
|
||||||
|
# Command: create a copy of target with a new debug container
|
||||||
|
kubectl debug target-debug -it --copy-to=target-copy --image=busybox --container=debug-container-3 --attach=false --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.json "${kube_flags[@]:?}"
|
||||||
|
# Post-Conditions
|
||||||
|
kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target-debug:debug-container-3:'
|
||||||
|
kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 0)).name}}' 'ENV_VAR1'
|
||||||
|
kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 0)).value}}' 'value1'
|
||||||
|
kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 1)).name}}' 'ENV_VAR2'
|
||||||
|
kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 1)).value}}' 'value2'
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
kubectl delete pod target-copy "${kube_flags[@]:?}"
|
||||||
|
kubectl delete pod target-debug "${kube_flags[@]:?}"
|
||||||
|
|
||||||
|
### Debug node with custom profile
|
||||||
|
# Pre-Condition: node exists
|
||||||
|
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
|
||||||
|
# Command: create a new node debugger pod
|
||||||
|
output_message=$(kubectl debug --profile general node/127.0.0.1 --image=busybox --custom="${TMPDIR:-/tmp}"/custom_profile.yaml --attach=false "${kube_flags[@]:?}" -- true)
|
||||||
|
# Post-Conditions
|
||||||
|
kube::test::get_object_assert pod "{{(len .items)}}" '1'
|
||||||
|
debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}")
|
||||||
|
kube::test::if_has_string "${output_message:?}" "${debugger:?}"
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox'
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1'
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 0)).name}}' 'ENV_VAR3'
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 0)).value}}' 'value3'
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 1)).name}}' 'ENV_VAR4'
|
||||||
|
kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 1)).value}}' 'value4'
|
||||||
|
# Clean up
|
||||||
|
# pod.spec.nodeName is set by kubectl debug node which causes the delete to hang,
|
||||||
|
# presumably waiting for a kubelet that's not present. Force the delete.
|
||||||
|
kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}"
|
||||||
|
|
||||||
|
set +o nounset
|
||||||
|
set +o errexit
|
||||||
|
}
|
||||||
|
@ -1046,6 +1046,7 @@ runTests() {
|
|||||||
record_command run_kubectl_debug_baseline_tests
|
record_command run_kubectl_debug_baseline_tests
|
||||||
record_command run_kubectl_debug_restricted_tests
|
record_command run_kubectl_debug_restricted_tests
|
||||||
record_command run_kubectl_debug_netadmin_tests
|
record_command run_kubectl_debug_netadmin_tests
|
||||||
|
record_command run_kubectl_debug_custom_profile_tests
|
||||||
fi
|
fi
|
||||||
if kube::test::if_supports_resource "${nodes}" ; then
|
if kube::test::if_supports_resource "${nodes}" ; then
|
||||||
record_command run_kubectl_debug_node_tests
|
record_command run_kubectl_debug_node_tests
|
||||||
|
Loading…
Reference in New Issue
Block a user