diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go index 31873fff47f..c10426bac01 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go @@ -56,6 +56,7 @@ import ( "k8s.io/kubectl/pkg/util/templates" "k8s.io/kubectl/pkg/util/term" "k8s.io/utils/ptr" + "sigs.k8s.io/yaml" ) var ( @@ -211,8 +212,8 @@ func (o *DebugOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.TargetContainer, "target", "", i18n.T("When using an ephemeral container, target processes in this container name.")) cmd.Flags().BoolVarP(&o.TTY, "tty", "t", o.TTY, i18n.T("Allocate a TTY for the debugging container.")) cmd.Flags().StringVar(&o.Profile, "profile", ProfileLegacy, i18n.T(`Options are "legacy", "general", "baseline", "netadmin", "restricted" or "sysadmin".`)) - if cmdutil.DebugCustomProfile.IsEnabled() { - cmd.Flags().StringVar(&o.CustomProfileFile, "custom", o.CustomProfileFile, i18n.T("Path to a JSON file containing a partial container spec to customize built-in debug profiles.")) + if !cmdutil.DebugCustomProfile.IsDisabled() { + cmd.Flags().StringVar(&o.CustomProfileFile, "custom", o.CustomProfileFile, i18n.T("Path to a JSON or YAML file containing a partial container spec to customize built-in debug profiles.")) } } @@ -293,7 +294,10 @@ func (o *DebugOptions) Complete(restClientGetter genericclioptions.RESTClientGet err = json.Unmarshal(customProfileBytes, &o.CustomProfile) if err != nil { - return fmt.Errorf("%s does not contain a valid container spec: %w", o.CustomProfileFile, err) + err = yaml.Unmarshal(customProfileBytes, &o.CustomProfile) + if err != nil { + return fmt.Errorf("%s does not contain a valid container spec: %w", o.CustomProfileFile, err) + } } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go index b9690e6efb1..4fbfbaeada0 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go @@ -22,8 +22,6 @@ import ( "testing" "time" - cmdutil "k8s.io/kubectl/pkg/cmd/util" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/spf13/cobra" @@ -2088,31 +2086,29 @@ func TestGenerateNodeDebugPodCustomProfile(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { - cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) { - var err error - kflags := KeepFlags{ - Labels: tc.opts.KeepLabels, - Annotations: tc.opts.KeepAnnotations, - Liveness: tc.opts.KeepLiveness, - Readiness: tc.opts.KeepReadiness, - Startup: tc.opts.KeepStartup, - InitContainers: tc.opts.KeepInitContainers, - } - tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) - if err != nil { - t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) - } - tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() + var err error + kflags := KeepFlags{ + Labels: tc.opts.KeepLabels, + Annotations: tc.opts.KeepAnnotations, + Liveness: tc.opts.KeepLiveness, + Readiness: tc.opts.KeepReadiness, + Startup: tc.opts.KeepStartup, + InitContainers: tc.opts.KeepInitContainers, + } + tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) + if err != nil { + t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) + } + tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() - pod, err := tc.opts.generateNodeDebugPod(tc.node) - if err != nil { - t.Fatalf("Fail to generate node debug pod: %v", err) - } - tc.expected.Name = pod.Name - if diff := cmp.Diff(tc.expected, pod); diff != "" { - t.Error("unexpected diff in generated object: (-want +got):\n", diff) - } - }) + pod, err := tc.opts.generateNodeDebugPod(tc.node) + if err != nil { + t.Fatalf("Fail to generate node debug pod: %v", err) + } + tc.expected.Name = pod.Name + if diff := cmp.Diff(tc.expected, pod); diff != "" { + t.Error("unexpected diff in generated object: (-want +got):\n", diff) + } }) } } @@ -2296,31 +2292,29 @@ func TestGenerateCopyDebugPodCustomProfile(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { - cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) { - var err error - kflags := KeepFlags{ - Labels: tc.opts.KeepLabels, - Annotations: tc.opts.KeepAnnotations, - Liveness: tc.opts.KeepLiveness, - Readiness: tc.opts.KeepReadiness, - Startup: tc.opts.KeepStartup, - InitContainers: tc.opts.KeepInitContainers, - } - tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) - if err != nil { - t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) - } - tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() + var err error + kflags := KeepFlags{ + Labels: tc.opts.KeepLabels, + Annotations: tc.opts.KeepAnnotations, + Liveness: tc.opts.KeepLiveness, + Readiness: tc.opts.KeepReadiness, + Startup: tc.opts.KeepStartup, + InitContainers: tc.opts.KeepInitContainers, + } + tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) + if err != nil { + t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) + } + tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() - pod, dc, err := tc.opts.generatePodCopyWithDebugContainer(tc.copyPod) - if err != nil { - t.Fatalf("Fail to generate node debug pod: %v", err) - } - tc.expected.Spec.Containers[0].Name = dc - if diff := cmp.Diff(tc.expected, pod); diff != "" { - t.Error("unexpected diff in generated object: (-want +got):\n", diff) - } - }) + pod, dc, err := tc.opts.generatePodCopyWithDebugContainer(tc.copyPod) + if err != nil { + t.Fatalf("Fail to generate node debug pod: %v", err) + } + tc.expected.Spec.Containers[0].Name = dc + if diff := cmp.Diff(tc.expected, pod); diff != "" { + t.Error("unexpected diff in generated object: (-want +got):\n", diff) + } }) } } @@ -2510,31 +2504,29 @@ func TestGenerateEphemeralDebugPodCustomProfile(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { - cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.DebugCustomProfile}, t, func(t *testing.T) { - var err error - kflags := KeepFlags{ - Labels: tc.opts.KeepLabels, - Annotations: tc.opts.KeepAnnotations, - Liveness: tc.opts.KeepLiveness, - Readiness: tc.opts.KeepReadiness, - Startup: tc.opts.KeepStartup, - InitContainers: tc.opts.KeepInitContainers, - } - tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) - if err != nil { - t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) - } - tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() + var err error + kflags := KeepFlags{ + Labels: tc.opts.KeepLabels, + Annotations: tc.opts.KeepAnnotations, + Liveness: tc.opts.KeepLiveness, + Readiness: tc.opts.KeepReadiness, + Startup: tc.opts.KeepStartup, + InitContainers: tc.opts.KeepInitContainers, + } + tc.opts.Applier, err = NewProfileApplier(tc.opts.Profile, kflags) + if err != nil { + t.Fatalf("Fail to create profile applier: %s: %v", tc.opts.Profile, err) + } + tc.opts.IOStreams = genericiooptions.NewTestIOStreamsDiscard() - pod, ec, err := tc.opts.generateDebugContainer(tc.copyPod) - if err != nil { - t.Fatalf("Fail to generate node debug pod: %v", err) - } - tc.expected.Spec.EphemeralContainers[0].Name = ec.Name - if diff := cmp.Diff(tc.expected, pod); diff != "" { - t.Error("unexpected diff in generated object: (-want +got):\n", diff) - } - }) + pod, ec, err := tc.opts.generateDebugContainer(tc.copyPod) + if err != nil { + t.Fatalf("Fail to generate node debug pod: %v", err) + } + tc.expected.Spec.EphemeralContainers[0].Name = ec.Name + if diff := cmp.Diff(tc.expected, pod); diff != "" { + t.Error("unexpected diff in generated object: (-want +got):\n", diff) + } }) } } diff --git a/test/cmd/debug.sh b/test/cmd/debug.sh index 2163eff90a8..bb49fa2d1fc 100755 --- a/test/cmd/debug.sh +++ b/test/cmd/debug.sh @@ -567,3 +567,95 @@ run_kubectl_debug_netadmin_node_tests() { set +o nounset set +o errexit } + +run_kubectl_debug_custom_profile_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug custom profile" + + ### Pod Troubleshooting by ephemeral containers with netadmin profile + # Pre-Condition: Pod "nginx" is created + kubectl run target-debug "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target-debug:' + + cat > "${TMPDIR:-/tmp}"/custom_profile.json << EOF +{ + "env": [ + { + "name": "ENV_VAR1", + "value": "value1" + }, + { + "name": "ENV_VAR2", + "value": "value2" + } + ] +} +EOF + +cat > "${TMPDIR:-/tmp}"/custom_profile.yaml << EOF +env: + - name: ENV_VAR3 + value: value3 + - name: ENV_VAR4 + value: value4 +EOF + + # Command: add a new debug container with general profile + output_message=$(kubectl debug target-debug -it --image=busybox --attach=false -c debug-container --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.json "${kube_flags[@]:?}") + + # Post-Conditions + kube::test::get_object_assert pod/target-debug '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 0)).name}}' 'ENV_VAR1' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 0)).value}}' 'value1' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 1)).name}}' 'ENV_VAR2' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 0).env 1)).value}}' 'value2' + + # Command: add a new debug container with general profile + kubectl debug target-debug -it --image=busybox --attach=false -c debug-container-2 --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.yaml "${kube_flags[@]:?}" + + # Post-Conditions + kube::test::get_object_assert pod/target-debug '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:debug-container-2:' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 0)).name}}' 'ENV_VAR3' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 0)).value}}' 'value3' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 1)).name}}' 'ENV_VAR4' + kube::test::get_object_assert pod/target-debug '{{((index (index .spec.ephemeralContainers 1).env 1)).value}}' 'value4' + + # Command: create a copy of target with a new debug container + kubectl debug target-debug -it --copy-to=target-copy --image=busybox --container=debug-container-3 --attach=false --profile=general --custom="${TMPDIR:-/tmp}"/custom_profile.json "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target-debug:debug-container-3:' + kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 0)).name}}' 'ENV_VAR1' + kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 0)).value}}' 'value1' + kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 1)).name}}' 'ENV_VAR2' + kube::test::get_object_assert pod/target-copy '{{((index (index .spec.containers 1).env 1)).value}}' 'value2' + + # Clean up + kubectl delete pod target-copy "${kube_flags[@]:?}" + kubectl delete pod target-debug "${kube_flags[@]:?}" + + ### Debug node with custom profile + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile general node/127.0.0.1 --image=busybox --custom="${TMPDIR:-/tmp}"/custom_profile.yaml --attach=false "${kube_flags[@]:?}" -- true) + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 0)).name}}' 'ENV_VAR3' + kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 0)).value}}' 'value3' + kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 1)).name}}' 'ENV_VAR4' + kube::test::get_object_assert "pod/${debugger:?}" '{{((index (index .spec.containers 0).env 1)).value}}' 'value4' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index 10ab751f14b..70e97481051 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -1046,6 +1046,7 @@ runTests() { record_command run_kubectl_debug_baseline_tests record_command run_kubectl_debug_restricted_tests record_command run_kubectl_debug_netadmin_tests + record_command run_kubectl_debug_custom_profile_tests fi if kube::test::if_supports_resource "${nodes}" ; then record_command run_kubectl_debug_node_tests