mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #96878 from knight42/feat/kubectl-strip-managed-fields
Enable kubectl-get to strip managed fields
This commit is contained in:
commit
e2f018dcda
@ -36,6 +36,7 @@ func (f *JSONYamlPrintFlags) AllowedFormats() []string {
|
||||
// Given the following flag values, a printer can be requested that knows
|
||||
// how to handle printing based on these values.
|
||||
type JSONYamlPrintFlags struct {
|
||||
showManagedFields bool
|
||||
}
|
||||
|
||||
// ToPrinter receives an outputFormat and returns a printer capable of
|
||||
@ -55,12 +56,17 @@ func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePr
|
||||
return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()}
|
||||
}
|
||||
|
||||
if !f.showManagedFields {
|
||||
printer = &printers.OmitManagedFieldsPrinter{Delegate: printer}
|
||||
}
|
||||
return printer, nil
|
||||
}
|
||||
|
||||
// AddFlags receives a *cobra.Command reference and binds
|
||||
// flags related to JSON or Yaml printing to it
|
||||
func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) {}
|
||||
func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) {
|
||||
c.Flags().BoolVar(&f.showManagedFields, "show-managed-fields", f.showManagedFields, "If true, keep the managedFields when printing objects in JSON or YAML format.")
|
||||
}
|
||||
|
||||
// NewJSONYamlPrintFlags returns flags associated with
|
||||
// yaml or json printing, with default values set.
|
||||
|
@ -8,6 +8,7 @@ go_library(
|
||||
"interface.go",
|
||||
"json.go",
|
||||
"jsonpath.go",
|
||||
"managedfields.go",
|
||||
"name.go",
|
||||
"sourcechecker.go",
|
||||
"tableprinter.go",
|
||||
@ -39,6 +40,7 @@ go_test(
|
||||
srcs = [
|
||||
"json_test.go",
|
||||
"jsonpath_test.go",
|
||||
"managedfields_test.go",
|
||||
"sourcechecker_test.go",
|
||||
"tableprinter_test.go",
|
||||
"template_test.go",
|
||||
@ -54,6 +56,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
59
staging/src/k8s.io/cli-runtime/pkg/printers/managedfields.go
Normal file
59
staging/src/k8s.io/cli-runtime/pkg/printers/managedfields.go
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package printers
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// OmitManagedFieldsPrinter wraps an existing printer and omits the managed fields from the object
|
||||
// before printing it.
|
||||
type OmitManagedFieldsPrinter struct {
|
||||
Delegate ResourcePrinter
|
||||
}
|
||||
|
||||
var _ ResourcePrinter = (*OmitManagedFieldsPrinter)(nil)
|
||||
|
||||
func omitManagedFields(o runtime.Object) runtime.Object {
|
||||
a, err := meta.Accessor(o)
|
||||
if err != nil {
|
||||
// The object is not a `metav1.Object`, ignore it.
|
||||
return o
|
||||
}
|
||||
a.SetManagedFields(nil)
|
||||
return o
|
||||
}
|
||||
|
||||
// PrintObj copies the object and omits the managed fields from the copied object before printing it.
|
||||
func (p *OmitManagedFieldsPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
||||
if obj == nil {
|
||||
return p.Delegate.PrintObj(obj, w)
|
||||
}
|
||||
if meta.IsListType(obj) {
|
||||
obj = obj.DeepCopyObject()
|
||||
_ = meta.EachListItem(obj, func(item runtime.Object) error {
|
||||
omitManagedFields(item)
|
||||
return nil
|
||||
})
|
||||
} else if _, err := meta.Accessor(obj); err == nil {
|
||||
obj = omitManagedFields(obj.DeepCopyObject())
|
||||
}
|
||||
return p.Delegate.PrintObj(obj, w)
|
||||
}
|
@ -0,0 +1,106 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package printers
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
type testResourcePrinter func(object runtime.Object, writer io.Writer) error
|
||||
|
||||
func (p testResourcePrinter) PrintObj(o runtime.Object, w io.Writer) error {
|
||||
return p(o, w)
|
||||
}
|
||||
|
||||
func TestOmitManagedFieldsPrinter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
object runtime.Object
|
||||
expected runtime.Object
|
||||
}{
|
||||
{
|
||||
name: "pod without managedFields",
|
||||
object: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
|
||||
},
|
||||
expected: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod with managedFields",
|
||||
object: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{Manager: "kubectl", Operation: metav1.ManagedFieldsOperationApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod list",
|
||||
object: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
ManagedFields: []metav1.ManagedFieldsEntry{
|
||||
{Manager: "kubectl", Operation: metav1.ManagedFieldsOperationApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod3"}},
|
||||
},
|
||||
},
|
||||
expected: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
r := require.New(t)
|
||||
delegate := func(o runtime.Object, w io.Writer) error {
|
||||
r.Equal(tc.expected, o)
|
||||
return nil
|
||||
}
|
||||
p := OmitManagedFieldsPrinter{Delegate: testResourcePrinter(delegate)}
|
||||
r.NoError(p.PrintObj(tc.object, nil))
|
||||
})
|
||||
}
|
||||
}
|
@ -35,7 +35,7 @@ run_kubectl_apply_tests() {
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
|
||||
# pod has field manager for kubectl client-side apply
|
||||
output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get --show-managed-fields -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-client-side-apply'
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
@ -367,7 +367,7 @@ run_kubectl_server_side_apply_tests() {
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
# pod has field manager for kubectl server-side apply
|
||||
output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get --show-managed-fields -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl'
|
||||
# pod has custom field manager
|
||||
kubectl apply --server-side --field-manager=my-field-manager --force-conflicts -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
|
@ -43,7 +43,7 @@ run_daemonset_tests() {
|
||||
kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4'
|
||||
# pod has field for kubectl set field manager
|
||||
output_message=$(kubectl get daemonsets bind -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get daemonsets bind --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-set'
|
||||
|
||||
# Rollout restart should change generation
|
||||
@ -340,7 +340,7 @@ run_deployment_tests() {
|
||||
rs="$(kubectl get rs "${newrs}" -o yaml)"
|
||||
kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\""
|
||||
# Deployment has field for kubectl rollout field manager
|
||||
output_message=$(kubectl get deployment nginx -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get deployment nginx --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-rollout'
|
||||
# Create second deployment
|
||||
${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}"
|
||||
@ -662,7 +662,7 @@ run_rs_tests() {
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5'
|
||||
|
||||
# RS has field for kubectl set field manager
|
||||
output_message=$(kubectl get rs frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get rs frontend --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-set'
|
||||
|
||||
### Delete replica set with id
|
||||
|
@ -81,7 +81,7 @@ run_pod_tests() {
|
||||
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
|
||||
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
|
||||
# pod has field manager for kubectl create
|
||||
output_message=$(kubectl get -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get --show-managed-fields -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-create'
|
||||
# Repeat above test using jsonpath template
|
||||
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
|
||||
@ -1014,7 +1014,7 @@ run_service_tests() {
|
||||
# Show dry-run works on running selector
|
||||
kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
|
||||
kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}"
|
||||
output_message=$(kubectl get services redis-master -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get services redis-master --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-set'
|
||||
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
|
||||
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
|
||||
|
@ -88,7 +88,7 @@ __EOF__
|
||||
kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule"
|
||||
# Node has field manager for kubectl taint
|
||||
output_message=$(kubectl get node 127.0.0.1 -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
output_message=$(kubectl get node 127.0.0.1 --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-taint'
|
||||
# Dry-run remove a taint
|
||||
kubectl taint node 127.0.0.1 --dry-run=client dedicated-
|
||||
|
Loading…
Reference in New Issue
Block a user