mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #112905 from alexzielenski/kubectl-apply-csa-migration
kubectl: `apply --server-side` managed fields migration
This commit is contained in:
commit
2f837dc113
10
staging/src/k8s.io/client-go/util/csaupgrade/OWNERS
Normal file
10
staging/src/k8s.io/client-go/util/csaupgrade/OWNERS
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
approvers:
|
||||||
|
- apelisse
|
||||||
|
- alexzielenski
|
||||||
|
reviewers:
|
||||||
|
- apelisse
|
||||||
|
- alexzielenski
|
||||||
|
- KnVerey
|
||||||
|
labels:
|
||||||
|
- sig/api-machinery
|
@ -18,17 +18,45 @@ package csaupgrade
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
const csaAnnotationName = "kubectl.kubernetes.io/last-applied-configuration"
|
// Finds all managed fields owners of the given operation type which owns all of
|
||||||
|
// the fields in the given set
|
||||||
|
//
|
||||||
|
// If there is an error decoding one of the fieldsets for any reason, it is ignored
|
||||||
|
// and assumed not to match the query.
|
||||||
|
func FindFieldsOwners(
|
||||||
|
managedFields []metav1.ManagedFieldsEntry,
|
||||||
|
operation metav1.ManagedFieldsOperationType,
|
||||||
|
fields *fieldpath.Set,
|
||||||
|
) []metav1.ManagedFieldsEntry {
|
||||||
|
var result []metav1.ManagedFieldsEntry
|
||||||
|
for _, entry := range managedFields {
|
||||||
|
if entry.Operation != operation {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
var csaAnnotationFieldSet = fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", csaAnnotationName))
|
fieldSet, err := decodeManagedFieldsEntrySet(entry)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fields.Difference(&fieldSet).Empty() {
|
||||||
|
result = append(result, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// Upgrades the Manager information for fields managed with client-side-apply (CSA)
|
// Upgrades the Manager information for fields managed with client-side-apply (CSA)
|
||||||
// Prepares fields owned by `csaManager` for 'Update' operations for use now
|
// Prepares fields owned by `csaManager` for 'Update' operations for use now
|
||||||
@ -48,21 +76,107 @@ var csaAnnotationFieldSet = fieldpath.NewSet(fieldpath.MakePathOrDie("metadata",
|
|||||||
// have changed before sending a patch.
|
// have changed before sending a patch.
|
||||||
//
|
//
|
||||||
// obj - Target of the operation which has been managed with CSA in the past
|
// obj - Target of the operation which has been managed with CSA in the past
|
||||||
// csaManagerName - Name of FieldManager formerly used for `Update` operations
|
// csaManagerNames - Names of FieldManagers to merge into ssaManagerName
|
||||||
// ssaManagerName - Name of FieldManager formerly used for `Apply` operations
|
// ssaManagerName - Name of FieldManager to be used for `Apply` operations
|
||||||
func UpgradeManagedFields(
|
func UpgradeManagedFields(
|
||||||
obj runtime.Object,
|
obj runtime.Object,
|
||||||
csaManagerName string,
|
csaManagerNames sets.Set[string],
|
||||||
ssaManagerName string,
|
ssaManagerName string,
|
||||||
) error {
|
) error {
|
||||||
accessor, err := meta.Accessor(obj)
|
accessor, err := meta.Accessor(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error accessing object metadata: %w", err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredManagers := accessor.GetManagedFields()
|
||||||
|
|
||||||
|
for csaManagerName := range csaManagerNames {
|
||||||
|
filteredManagers, err = upgradedManagedFields(
|
||||||
|
filteredManagers, csaManagerName, ssaManagerName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit changes to object
|
||||||
|
accessor.SetManagedFields(filteredManagers)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculates a minimal JSON Patch to send to upgrade managed fields
|
||||||
|
// See `UpgradeManagedFields` for more information.
|
||||||
|
//
|
||||||
|
// obj - Target of the operation which has been managed with CSA in the past
|
||||||
|
// csaManagerNames - Names of FieldManagers to merge into ssaManagerName
|
||||||
|
// ssaManagerName - Name of FieldManager to be used for `Apply` operations
|
||||||
|
//
|
||||||
|
// Returns non-nil error if there was an error, a JSON patch, or nil bytes if
|
||||||
|
// there is no work to be done.
|
||||||
|
func UpgradeManagedFieldsPatch(
|
||||||
|
obj runtime.Object,
|
||||||
|
csaManagerNames sets.Set[string],
|
||||||
|
ssaManagerName string) ([]byte, error) {
|
||||||
|
accessor, err := meta.Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
managedFields := accessor.GetManagedFields()
|
||||||
|
filteredManagers := accessor.GetManagedFields()
|
||||||
|
for csaManagerName := range csaManagerNames {
|
||||||
|
filteredManagers, err = upgradedManagedFields(
|
||||||
|
filteredManagers, csaManagerName, ssaManagerName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if reflect.DeepEqual(managedFields, filteredManagers) {
|
||||||
|
// If the managed fields have not changed from the transformed version,
|
||||||
|
// there is no patch to perform
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a patch with a diff between old and new objects.
|
||||||
|
// Just include all managed fields since that is only thing that will change
|
||||||
|
//
|
||||||
|
// Also include test for RV to avoid race condition
|
||||||
|
jsonPatch := []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"op": "replace",
|
||||||
|
"path": "/metadata/managedFields",
|
||||||
|
"value": filteredManagers,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Use "replace" instead of "test" operation so that etcd rejects with
|
||||||
|
// 409 conflict instead of apiserver with an invalid request
|
||||||
|
"op": "replace",
|
||||||
|
"path": "/metadata/resourceVersion",
|
||||||
|
"value": accessor.GetResourceVersion(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(jsonPatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a copy of the provided managed fields that has been migrated from
|
||||||
|
// client-side-apply to server-side-apply, or an error if there was an issue
|
||||||
|
func upgradedManagedFields(
|
||||||
|
managedFields []metav1.ManagedFieldsEntry,
|
||||||
|
csaManagerName string,
|
||||||
|
ssaManagerName string,
|
||||||
|
) ([]metav1.ManagedFieldsEntry, error) {
|
||||||
|
if managedFields == nil {
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create managed fields clone since we modify the values
|
// Create managed fields clone since we modify the values
|
||||||
var managedFields []metav1.ManagedFieldsEntry
|
managedFieldsCopy := make([]metav1.ManagedFieldsEntry, len(managedFields))
|
||||||
managedFields = append(managedFields, accessor.GetManagedFields()...)
|
if copy(managedFieldsCopy, managedFields) != len(managedFields) {
|
||||||
|
return nil, errors.New("failed to copy managed fields")
|
||||||
|
}
|
||||||
|
managedFields = managedFieldsCopy
|
||||||
|
|
||||||
// Locate SSA manager
|
// Locate SSA manager
|
||||||
replaceIndex, managerExists := findFirstIndex(managedFields,
|
replaceIndex, managerExists := findFirstIndex(managedFields,
|
||||||
@ -88,16 +202,16 @@ func UpgradeManagedFields(
|
|||||||
if !managerExists {
|
if !managerExists {
|
||||||
// There are no CSA managers that need to be converted. Nothing to do
|
// There are no CSA managers that need to be converted. Nothing to do
|
||||||
// Return early
|
// Return early
|
||||||
return nil
|
return managedFields, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert CSA manager into SSA manager
|
// Convert CSA manager into SSA manager
|
||||||
managedFields[replaceIndex].Operation = metav1.ManagedFieldsOperationApply
|
managedFields[replaceIndex].Operation = metav1.ManagedFieldsOperationApply
|
||||||
managedFields[replaceIndex].Manager = ssaManagerName
|
managedFields[replaceIndex].Manager = ssaManagerName
|
||||||
}
|
}
|
||||||
err = unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName)
|
err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create version of managed fields which has no CSA managers with the given name
|
// Create version of managed fields which has no CSA managers with the given name
|
||||||
@ -107,21 +221,17 @@ func UpgradeManagedFields(
|
|||||||
entry.Subresource == "")
|
entry.Subresource == "")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Wipe out last-applied-configuration annotation if it exists
|
return filteredManagers, nil
|
||||||
annotations := accessor.GetAnnotations()
|
|
||||||
delete(annotations, csaAnnotationName)
|
|
||||||
|
|
||||||
// Commit changes to object
|
|
||||||
accessor.SetAnnotations(annotations)
|
|
||||||
accessor.SetManagedFields(filteredManagers)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locates an Update manager entry named `csaManagerName` with the same APIVersion
|
// Locates an Update manager entry named `csaManagerName` with the same APIVersion
|
||||||
// as the manager at the targetIndex. Unions both manager's fields together
|
// as the manager at the targetIndex. Unions both manager's fields together
|
||||||
// into the manager specified by `targetIndex`. No other managers are modified.
|
// into the manager specified by `targetIndex`. No other managers are modified.
|
||||||
func unionManagerIntoIndex(entries []metav1.ManagedFieldsEntry, targetIndex int, csaManagerName string) error {
|
func unionManagerIntoIndex(
|
||||||
|
entries []metav1.ManagedFieldsEntry,
|
||||||
|
targetIndex int,
|
||||||
|
csaManagerName string,
|
||||||
|
) error {
|
||||||
ssaManager := entries[targetIndex]
|
ssaManager := entries[targetIndex]
|
||||||
|
|
||||||
// find Update manager of same APIVersion, union ssa fields with it.
|
// find Update manager of same APIVersion, union ssa fields with it.
|
||||||
@ -130,6 +240,8 @@ func unionManagerIntoIndex(entries []metav1.ManagedFieldsEntry, targetIndex int,
|
|||||||
func(entry metav1.ManagedFieldsEntry) bool {
|
func(entry metav1.ManagedFieldsEntry) bool {
|
||||||
return entry.Manager == csaManagerName &&
|
return entry.Manager == csaManagerName &&
|
||||||
entry.Operation == metav1.ManagedFieldsOperationUpdate &&
|
entry.Operation == metav1.ManagedFieldsOperationUpdate &&
|
||||||
|
//!TODO: some users may want to migrate subresources.
|
||||||
|
// should thread through the args at some point.
|
||||||
entry.Subresource == "" &&
|
entry.Subresource == "" &&
|
||||||
entry.APIVersion == ssaManager.APIVersion
|
entry.APIVersion == ssaManager.APIVersion
|
||||||
})
|
})
|
||||||
@ -154,10 +266,6 @@ func unionManagerIntoIndex(entries []metav1.ManagedFieldsEntry, targetIndex int,
|
|||||||
combinedFieldSet = combinedFieldSet.Union(&csaFieldSet)
|
combinedFieldSet = combinedFieldSet.Union(&csaFieldSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the resultant fieldset does not include the
|
|
||||||
// last applied annotation
|
|
||||||
combinedFieldSet = combinedFieldSet.Difference(csaAnnotationFieldSet)
|
|
||||||
|
|
||||||
// Encode the fields back to the serialized format
|
// Encode the fields back to the serialized format
|
||||||
err = encodeManagedFieldsEntrySet(&entries[targetIndex], *combinedFieldSet)
|
err = encodeManagedFieldsEntrySet(&entries[targetIndex], *combinedFieldSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -17,20 +17,282 @@ limitations under the License.
|
|||||||
package csaupgrade_test
|
package csaupgrade_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
jsonpatch "github.com/evanphx/json-patch"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/yaml"
|
"k8s.io/apimachinery/pkg/util/yaml"
|
||||||
"k8s.io/client-go/util/csaupgrade"
|
"k8s.io/client-go/util/csaupgrade"
|
||||||
|
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestFindOwners(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
Name string
|
||||||
|
ManagedFieldsYAML string
|
||||||
|
Operation metav1.ManagedFieldsOperationType
|
||||||
|
Fields *fieldpath.Set
|
||||||
|
Expectation []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// Field a root field path owner
|
||||||
|
Name: "Basic",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-22T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("data")),
|
||||||
|
Expectation: []string{"kubectl-client-side-apply"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Find a fieldpath nested inside another field
|
||||||
|
Name: "Nested",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-22T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration")),
|
||||||
|
Expectation: []string{"kubectl-client-side-apply"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Search for an operaiton/fieldpath combination that is not found on both
|
||||||
|
// axes
|
||||||
|
Name: "NotFound",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration")),
|
||||||
|
Expectation: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test using apply operation
|
||||||
|
Name: "ApplyOperation",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationApply,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration")),
|
||||||
|
Expectation: []string{"kubectl"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Of multiple field managers, match a single one
|
||||||
|
Name: "OneOfMultiple",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration")),
|
||||||
|
Expectation: []string{"kubectl-client-side-apply"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// have multiple field managers, and match more than one but not all of them
|
||||||
|
Name: "ManyOfMultiple",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl-client-side-apply2
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration")),
|
||||||
|
Expectation: []string{"kubectl-client-side-apply", "kubectl-client-side-apply2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test with multiple fields to match against
|
||||||
|
Name: "BasicMultipleFields",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(
|
||||||
|
fieldpath.MakePathOrDie("data", "key"),
|
||||||
|
fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration"),
|
||||||
|
),
|
||||||
|
Expectation: []string{"kubectl-client-side-apply"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test with multiplle fields but the manager is missing one of the fields
|
||||||
|
// requested so it does not match
|
||||||
|
Name: "MissingOneField",
|
||||||
|
ManagedFieldsYAML: `
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:legacy: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
`,
|
||||||
|
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||||
|
Fields: fieldpath.NewSet(
|
||||||
|
fieldpath.MakePathOrDie("data", "key"),
|
||||||
|
fieldpath.MakePathOrDie("metadata", "annotations", "kubectl.kubernetes.io/last-applied-configuration"),
|
||||||
|
),
|
||||||
|
Expectation: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tcase := range testCases {
|
||||||
|
t.Run(tcase.Name, func(t *testing.T) {
|
||||||
|
var entries struct {
|
||||||
|
ManagedFields []metav1.ManagedFieldsEntry `json:"managedFields"`
|
||||||
|
}
|
||||||
|
err := yaml.Unmarshal([]byte(tcase.ManagedFieldsYAML), &entries)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result := csaupgrade.FindFieldsOwners(entries.ManagedFields, tcase.Operation, tcase.Fields)
|
||||||
|
|
||||||
|
// Compare owner names since they uniquely identify the selected entries
|
||||||
|
// (given that the operation is provided)
|
||||||
|
ownerNames := []string{}
|
||||||
|
for _, entry := range result {
|
||||||
|
ownerNames = append(ownerNames, entry.Manager)
|
||||||
|
require.Equal(t, tcase.Operation, entry.Operation)
|
||||||
|
}
|
||||||
|
require.ElementsMatch(t, tcase.Expectation, ownerNames)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpgradeCSA(t *testing.T) {
|
func TestUpgradeCSA(t *testing.T) {
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Name string
|
Name string
|
||||||
CSAManager string
|
CSAManagers []string
|
||||||
SSAManager string
|
SSAManager string
|
||||||
OriginalObject []byte
|
OriginalObject []byte
|
||||||
ExpectedObject []byte
|
ExpectedObject []byte
|
||||||
@ -39,14 +301,15 @@ func TestUpgradeCSA(t *testing.T) {
|
|||||||
// Case where there is a CSA entry with the given name, but no SSA entry
|
// Case where there is a CSA entry with the given name, but no SSA entry
|
||||||
// is found. Expect that the CSA entry is converted to an SSA entry
|
// is found. Expect that the CSA entry is converted to an SSA entry
|
||||||
// and renamed.
|
// and renamed.
|
||||||
Name: "csa-basic-direct-conversion",
|
Name: "csa-basic-direct-conversion",
|
||||||
CSAManager: "kubectl-client-side-apply",
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
SSAManager: "kubectl",
|
SSAManager: "kubectl",
|
||||||
OriginalObject: []byte(`
|
OriginalObject: []byte(`
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/last-applied-configuration: |
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
@ -74,7 +337,10 @@ apiVersion: v1
|
|||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
annotations: {}
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
creationTimestamp: "2022-08-22T23:08:23Z"
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
managedFields:
|
managedFields:
|
||||||
- apiVersion: v1
|
- apiVersion: v1
|
||||||
@ -85,12 +351,14 @@ metadata:
|
|||||||
f:key: {}
|
f:key: {}
|
||||||
f:legacy: {}
|
f:legacy: {}
|
||||||
f:metadata:
|
f:metadata:
|
||||||
f:annotations: {}
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
manager: kubectl
|
manager: kubectl
|
||||||
operation: Apply
|
operation: Apply
|
||||||
time: "2022-08-22T23:08:23Z"
|
time: "2022-08-22T23:08:23Z"
|
||||||
name: test
|
name: test
|
||||||
namespace: default
|
namespace: default
|
||||||
`),
|
`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -98,14 +366,15 @@ metadata:
|
|||||||
// Server creates duplicate managed fields entry - one for Update and another
|
// Server creates duplicate managed fields entry - one for Update and another
|
||||||
// for Apply. Expect entries to be merged into one entry, which is unchanged
|
// for Apply. Expect entries to be merged into one entry, which is unchanged
|
||||||
// from initial SSA.
|
// from initial SSA.
|
||||||
Name: "csa-combine-with-ssa-duplicate-keys",
|
Name: "csa-combine-with-ssa-duplicate-keys",
|
||||||
CSAManager: "kubectl-client-side-apply",
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
SSAManager: "kubectl",
|
SSAManager: "kubectl",
|
||||||
OriginalObject: []byte(`
|
OriginalObject: []byte(`
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/last-applied-configuration: |
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
@ -147,7 +416,10 @@ apiVersion: v1
|
|||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
annotations: {}
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
creationTimestamp: "2022-08-22T23:08:23Z"
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
managedFields:
|
managedFields:
|
||||||
- apiVersion: v1
|
- apiVersion: v1
|
||||||
@ -158,12 +430,14 @@ metadata:
|
|||||||
f:key: {}
|
f:key: {}
|
||||||
f:legacy: {}
|
f:legacy: {}
|
||||||
f:metadata:
|
f:metadata:
|
||||||
f:annotations: {}
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
manager: kubectl
|
manager: kubectl
|
||||||
operation: Apply
|
operation: Apply
|
||||||
time: "2022-08-23T23:08:23Z"
|
time: "2022-08-23T23:08:23Z"
|
||||||
name: test
|
name: test
|
||||||
namespace: default
|
namespace: default
|
||||||
`),
|
`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -173,14 +447,15 @@ metadata:
|
|||||||
// This shows that upgrading such an object results in correct behavior next
|
// This shows that upgrading such an object results in correct behavior next
|
||||||
// time SSA applier
|
// time SSA applier
|
||||||
// Expect final object to have unioned keys from both entries
|
// Expect final object to have unioned keys from both entries
|
||||||
Name: "csa-combine-with-ssa-additional-keys",
|
Name: "csa-combine-with-ssa-additional-keys",
|
||||||
CSAManager: "kubectl-client-side-apply",
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
SSAManager: "kubectl",
|
SSAManager: "kubectl",
|
||||||
OriginalObject: []byte(`
|
OriginalObject: []byte(`
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/last-applied-configuration: |
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
@ -221,7 +496,10 @@ apiVersion: v1
|
|||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
annotations: {}
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
creationTimestamp: "2022-08-22T23:08:23Z"
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
managedFields:
|
managedFields:
|
||||||
- apiVersion: v1
|
- apiVersion: v1
|
||||||
@ -232,26 +510,29 @@ metadata:
|
|||||||
f:key: {}
|
f:key: {}
|
||||||
f:legacy: {}
|
f:legacy: {}
|
||||||
f:metadata:
|
f:metadata:
|
||||||
f:annotations: {}
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
manager: kubectl
|
manager: kubectl
|
||||||
operation: Apply
|
operation: Apply
|
||||||
time: "2022-08-23T23:08:23Z"
|
time: "2022-08-23T23:08:23Z"
|
||||||
name: test
|
name: test
|
||||||
namespace: default
|
namespace: default
|
||||||
`),
|
`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Case when there are multiple CSA versions on the object which do not
|
// Case when there are multiple CSA versions on the object which do not
|
||||||
// match the version from the apply entry. Shows they are tossed away
|
// match the version from the apply entry. Shows they are tossed away
|
||||||
// without being merged.
|
// without being merged.
|
||||||
Name: "csa-no-applicable-version",
|
Name: "csa-no-applicable-version",
|
||||||
CSAManager: "kubectl-client-side-apply",
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
SSAManager: "kubectl",
|
SSAManager: "kubectl",
|
||||||
OriginalObject: []byte(`
|
OriginalObject: []byte(`
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/last-applied-configuration: |
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
@ -323,7 +604,10 @@ apiVersion: v1
|
|||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
annotations: {}
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
creationTimestamp: "2022-08-22T23:08:23Z"
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
managedFields:
|
managedFields:
|
||||||
- apiVersion: v5
|
- apiVersion: v5
|
||||||
@ -334,26 +618,29 @@ metadata:
|
|||||||
f:key: {}
|
f:key: {}
|
||||||
f:legacy: {}
|
f:legacy: {}
|
||||||
f:metadata:
|
f:metadata:
|
||||||
f:annotations: {}
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
manager: kubectl
|
manager: kubectl
|
||||||
operation: Apply
|
operation: Apply
|
||||||
time: "2022-08-23T23:08:23Z"
|
time: "2022-08-23T23:08:23Z"
|
||||||
name: test
|
name: test
|
||||||
namespace: default
|
namespace: default
|
||||||
`),
|
`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Case when there are multiple CSA versions on the object which do not
|
// Case when there are multiple CSA versions on the object which do not
|
||||||
// match the version from the apply entry, and one which does.
|
// match the version from the apply entry, and one which does.
|
||||||
// Shows that CSA entry with matching version is unioned into the SSA entry.
|
// Shows that CSA entry with matching version is unioned into the SSA entry.
|
||||||
Name: "csa-single-applicable-version",
|
Name: "csa-single-applicable-version",
|
||||||
CSAManager: "kubectl-client-side-apply",
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
SSAManager: "kubectl",
|
SSAManager: "kubectl",
|
||||||
OriginalObject: []byte(`
|
OriginalObject: []byte(`
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
annotations:
|
annotations:
|
||||||
kubectl.kubernetes.io/last-applied-configuration: |
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
@ -425,7 +712,10 @@ apiVersion: v1
|
|||||||
data: {}
|
data: {}
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
annotations: {}
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
creationTimestamp: "2022-08-22T23:08:23Z"
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
managedFields:
|
managedFields:
|
||||||
- apiVersion: v5
|
- apiVersion: v5
|
||||||
@ -440,11 +730,355 @@ metadata:
|
|||||||
f:annotations:
|
f:annotations:
|
||||||
.: {}
|
.: {}
|
||||||
f:hello2: {}
|
f:hello2: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
manager: kubectl
|
manager: kubectl
|
||||||
operation: Apply
|
operation: Apply
|
||||||
time: "2022-08-23T23:08:23Z"
|
time: "2022-08-23T23:08:23Z"
|
||||||
name: test
|
name: test
|
||||||
namespace: default
|
namespace: default
|
||||||
|
`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Do nothing to object with nothing to migrate and no existing SSA manager
|
||||||
|
Name: "noop",
|
||||||
|
CSAManagers: []string{"kubectl-client-side-apply"},
|
||||||
|
SSAManager: "not-already-in-object",
|
||||||
|
OriginalObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v5
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
|
`),
|
||||||
|
ExpectedObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v5
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:data:
|
||||||
|
.: {}
|
||||||
|
f:key: {}
|
||||||
|
f:legacy: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-08-23T23:08:23Z"
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
|
`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Expect multiple targets to be merged into existing ssa manager
|
||||||
|
Name: "multipleTargetsExisting",
|
||||||
|
CSAManagers: []string{"kube-scheduler", "kubectl-client-side-apply"},
|
||||||
|
SSAManager: "kubectl",
|
||||||
|
OriginalObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:labels:
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:name: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:status:
|
||||||
|
f:conditions:
|
||||||
|
.: {}
|
||||||
|
k:{"type":"PodScheduled"}:
|
||||||
|
.: {}
|
||||||
|
f:lastProbeTime: {}
|
||||||
|
f:lastTransitionTime: {}
|
||||||
|
f:message: {}
|
||||||
|
f:reason: {}
|
||||||
|
f:status: {}
|
||||||
|
f:type: {}
|
||||||
|
manager: kube-scheduler
|
||||||
|
operation: Update
|
||||||
|
time: "2022-11-03T23:22:40Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:imagePullPolicy: {}
|
||||||
|
f:name: {}
|
||||||
|
f:resources: {}
|
||||||
|
f:terminationMessagePath: {}
|
||||||
|
f:terminationMessagePolicy: {}
|
||||||
|
f:dnsPolicy: {}
|
||||||
|
f:enableServiceLinks: {}
|
||||||
|
f:restartPolicy: {}
|
||||||
|
f:schedulerName: {}
|
||||||
|
f:securityContext: {}
|
||||||
|
f:terminationGracePeriodSeconds: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-11-03T23:22:40Z"
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
|
`),
|
||||||
|
ExpectedObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:status:
|
||||||
|
f:conditions:
|
||||||
|
.: {}
|
||||||
|
k:{"type":"PodScheduled"}:
|
||||||
|
.: {}
|
||||||
|
f:lastProbeTime: {}
|
||||||
|
f:lastTransitionTime: {}
|
||||||
|
f:message: {}
|
||||||
|
f:reason: {}
|
||||||
|
f:status: {}
|
||||||
|
f:type: {}
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:imagePullPolicy: {}
|
||||||
|
f:name: {}
|
||||||
|
f:resources: {}
|
||||||
|
f:terminationMessagePath: {}
|
||||||
|
f:terminationMessagePolicy: {}
|
||||||
|
f:dnsPolicy: {}
|
||||||
|
f:enableServiceLinks: {}
|
||||||
|
f:restartPolicy: {}
|
||||||
|
f:schedulerName: {}
|
||||||
|
f:securityContext: {}
|
||||||
|
f:terminationGracePeriodSeconds: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
|
`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Expect multiple targets to be merged into a new ssa manager
|
||||||
|
Name: "multipleTargetsNewInsertion",
|
||||||
|
CSAManagers: []string{"kubectl-client-side-apply", "kube-scheduler"},
|
||||||
|
SSAManager: "newly-inserted-manager",
|
||||||
|
OriginalObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:labels:
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:name: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:status:
|
||||||
|
f:conditions:
|
||||||
|
.: {}
|
||||||
|
k:{"type":"PodScheduled"}:
|
||||||
|
.: {}
|
||||||
|
f:lastProbeTime: {}
|
||||||
|
f:lastTransitionTime: {}
|
||||||
|
f:message: {}
|
||||||
|
f:reason: {}
|
||||||
|
f:status: {}
|
||||||
|
f:type: {}
|
||||||
|
manager: kube-scheduler
|
||||||
|
operation: Update
|
||||||
|
time: "2022-11-03T23:22:40Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:imagePullPolicy: {}
|
||||||
|
f:name: {}
|
||||||
|
f:resources: {}
|
||||||
|
f:terminationMessagePath: {}
|
||||||
|
f:terminationMessagePolicy: {}
|
||||||
|
f:dnsPolicy: {}
|
||||||
|
f:enableServiceLinks: {}
|
||||||
|
f:restartPolicy: {}
|
||||||
|
f:schedulerName: {}
|
||||||
|
f:securityContext: {}
|
||||||
|
f:terminationGracePeriodSeconds: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-11-03T23:22:40Z"
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
|
`),
|
||||||
|
ExpectedObject: []byte(`
|
||||||
|
apiVersion: v1
|
||||||
|
data: {}
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
resourceVersion: "1"
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}}
|
||||||
|
creationTimestamp: "2022-08-22T23:08:23Z"
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:labels:
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:name: {}
|
||||||
|
manager: kubectl
|
||||||
|
operation: Apply
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:containers:
|
||||||
|
k:{"name":"kubernetes-pause"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:imagePullPolicy: {}
|
||||||
|
f:name: {}
|
||||||
|
f:resources: {}
|
||||||
|
f:terminationMessagePath: {}
|
||||||
|
f:terminationMessagePolicy: {}
|
||||||
|
f:dnsPolicy: {}
|
||||||
|
f:enableServiceLinks: {}
|
||||||
|
f:restartPolicy: {}
|
||||||
|
f:schedulerName: {}
|
||||||
|
f:securityContext: {}
|
||||||
|
f:terminationGracePeriodSeconds: {}
|
||||||
|
f:status:
|
||||||
|
f:conditions:
|
||||||
|
.: {}
|
||||||
|
k:{"type":"PodScheduled"}:
|
||||||
|
.: {}
|
||||||
|
f:lastProbeTime: {}
|
||||||
|
f:lastTransitionTime: {}
|
||||||
|
f:message: {}
|
||||||
|
f:reason: {}
|
||||||
|
f:status: {}
|
||||||
|
f:type: {}
|
||||||
|
manager: newly-inserted-manager
|
||||||
|
operation: Apply
|
||||||
|
time: "2022-11-03T23:22:40Z"
|
||||||
|
name: test
|
||||||
|
namespace: default
|
||||||
`),
|
`),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -452,7 +1086,7 @@ metadata:
|
|||||||
for _, testCase := range cases {
|
for _, testCase := range cases {
|
||||||
t.Run(testCase.Name, func(t *testing.T) {
|
t.Run(testCase.Name, func(t *testing.T) {
|
||||||
initialObject := unstructured.Unstructured{}
|
initialObject := unstructured.Unstructured{}
|
||||||
err := yaml.Unmarshal(testCase.OriginalObject, &initialObject)
|
err := yaml.Unmarshal(testCase.OriginalObject, &initialObject.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -460,7 +1094,7 @@ metadata:
|
|||||||
upgraded := initialObject.DeepCopy()
|
upgraded := initialObject.DeepCopy()
|
||||||
err = csaupgrade.UpgradeManagedFields(
|
err = csaupgrade.UpgradeManagedFields(
|
||||||
upgraded,
|
upgraded,
|
||||||
testCase.CSAManager,
|
sets.New(testCase.CSAManagers...),
|
||||||
testCase.SSAManager,
|
testCase.SSAManager,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -469,7 +1103,7 @@ metadata:
|
|||||||
}
|
}
|
||||||
|
|
||||||
expectedObject := unstructured.Unstructured{}
|
expectedObject := unstructured.Unstructured{}
|
||||||
err = yaml.Unmarshal(testCase.ExpectedObject, &expectedObject)
|
err = yaml.Unmarshal(testCase.ExpectedObject, &expectedObject.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -477,6 +1111,42 @@ metadata:
|
|||||||
if !reflect.DeepEqual(&expectedObject, upgraded) {
|
if !reflect.DeepEqual(&expectedObject, upgraded) {
|
||||||
t.Fatal(cmp.Diff(&expectedObject, upgraded))
|
t.Fatal(cmp.Diff(&expectedObject, upgraded))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show that the UpgradeManagedFieldsPatch yields a patch that does
|
||||||
|
// nothing more and nothing less than make the object equal to output
|
||||||
|
// of UpgradeManagedFields
|
||||||
|
|
||||||
|
initialCopy := initialObject.DeepCopyObject()
|
||||||
|
patchBytes, err := csaupgrade.UpgradeManagedFieldsPatch(
|
||||||
|
initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if patchBytes != nil {
|
||||||
|
patch, err := jsonpatch.DecodePatch(patchBytes)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
initialJSON, err := json.Marshal(initialObject.Object)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patchedBytes, err := patch.Apply(initialJSON)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var patched unstructured.Unstructured
|
||||||
|
if err := json.Unmarshal(patchedBytes, &patched.Object); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(&patched, upgraded) {
|
||||||
|
t.Fatalf("expected patch to produce an upgraded object: %v", cmp.Diff(patched, upgraded))
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@ require (
|
|||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
|
||||||
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
|
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
|
||||||
sigs.k8s.io/kustomize/kyaml v0.13.9
|
sigs.k8s.io/kustomize/kyaml v0.13.9
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3
|
||||||
sigs.k8s.io/yaml v1.3.0
|
sigs.k8s.io/yaml v1.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -87,7 +88,6 @@ require (
|
|||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -36,6 +37,7 @@ import (
|
|||||||
"k8s.io/cli-runtime/pkg/printers"
|
"k8s.io/cli-runtime/pkg/printers"
|
||||||
"k8s.io/cli-runtime/pkg/resource"
|
"k8s.io/cli-runtime/pkg/resource"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
|
"k8s.io/client-go/util/csaupgrade"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubectl/pkg/cmd/delete"
|
"k8s.io/kubectl/pkg/cmd/delete"
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||||
@ -163,6 +165,9 @@ var (
|
|||||||
|
|
||||||
warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n"
|
warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n"
|
||||||
warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n"
|
warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n"
|
||||||
|
warningMigrationLastAppliedFailed = "Warning: failed to migrate kubectl.kubernetes.io/last-applied-configuration for Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||||
|
warningMigrationPatchFailed = "Warning: server rejected managed fields migration to Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||||
|
warningMigrationReapplyFailed = "Warning: failed to re-apply configuration after performing Server-Side Apply migration. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewApplyFlags returns a default ApplyFlags
|
// NewApplyFlags returns a default ApplyFlags
|
||||||
@ -542,6 +547,40 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
|
|||||||
|
|
||||||
info.Refresh(obj, true)
|
info.Refresh(obj, true)
|
||||||
|
|
||||||
|
// Migrate managed fields if necessary.
|
||||||
|
//
|
||||||
|
// By checking afterward instead of fetching the object beforehand and
|
||||||
|
// unconditionally fetching we can make 3 network requests in the rare
|
||||||
|
// case of migration and 1 request if migration is unnecessary.
|
||||||
|
//
|
||||||
|
// To check beforehand means 2 requests for most operations, and 3
|
||||||
|
// requests in worst case.
|
||||||
|
if err = o.saveLastApplyAnnotationIfNecessary(helper, info); err != nil {
|
||||||
|
fmt.Fprintf(o.ErrOut, warningMigrationLastAppliedFailed, err.Error())
|
||||||
|
} else if performedMigration, err := o.migrateToSSAIfNecessary(helper, info); err != nil {
|
||||||
|
// Print-error as a warning.
|
||||||
|
// This is a non-fatal error because object was successfully applied
|
||||||
|
// above, but it might have issues since migration failed.
|
||||||
|
//
|
||||||
|
// This migration will be re-attempted if necessary upon next
|
||||||
|
// apply.
|
||||||
|
fmt.Fprintf(o.ErrOut, warningMigrationPatchFailed, err.Error())
|
||||||
|
} else if performedMigration {
|
||||||
|
if obj, err = helper.Patch(
|
||||||
|
info.Namespace,
|
||||||
|
info.Name,
|
||||||
|
types.ApplyPatchType,
|
||||||
|
data,
|
||||||
|
&options,
|
||||||
|
); err != nil {
|
||||||
|
// Re-send original SSA patch (this will allow dropped fields to
|
||||||
|
// finally be removed)
|
||||||
|
fmt.Fprintf(o.ErrOut, warningMigrationReapplyFailed, err.Error())
|
||||||
|
} else {
|
||||||
|
info.Refresh(obj, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
WarnIfDeleting(info.Object, o.ErrOut)
|
WarnIfDeleting(info.Object, o.ErrOut)
|
||||||
|
|
||||||
if err := o.MarkObjectVisited(info); err != nil {
|
if err := o.MarkObjectVisited(info); err != nil {
|
||||||
@ -660,6 +699,183 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Saves the last-applied-configuration annotation in a separate SSA field manager
|
||||||
|
// to prevent it from being dropped by users who have transitioned to SSA.
|
||||||
|
//
|
||||||
|
// If this operation is not performed, then the last-applied-configuration annotation
|
||||||
|
// would be removed from the object upon the first SSA usage. We want to keep it
|
||||||
|
// around for a few releases since it is required to downgrade to
|
||||||
|
// SSA per [1] and [2]. This code should be removed once the annotation is
|
||||||
|
// deprecated.
|
||||||
|
//
|
||||||
|
// - [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#downgrading-from-server-side-apply-to-client-side-apply
|
||||||
|
// - [2] https://github.com/kubernetes/kubernetes/pull/90187
|
||||||
|
//
|
||||||
|
// If the annotation is not already present, or if it is already managed by the
|
||||||
|
// separate SSA fieldmanager, this is a no-op.
|
||||||
|
func (o *ApplyOptions) saveLastApplyAnnotationIfNecessary(
|
||||||
|
helper *resource.Helper,
|
||||||
|
info *resource.Info,
|
||||||
|
) error {
|
||||||
|
if o.FieldManager != fieldManagerServerSideApply {
|
||||||
|
// There is no point in preserving the annotation if the field manager
|
||||||
|
// will not remain default. This is because the server will not keep
|
||||||
|
// the annotation up to date.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send an apply patch with the last-applied-annotation
|
||||||
|
// so that it is not orphaned by SSA in the following patch:
|
||||||
|
accessor, err := meta.Accessor(info.Object)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the current annotations from the object.
|
||||||
|
annots := accessor.GetAnnotations()
|
||||||
|
if annots == nil {
|
||||||
|
annots = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldManager := fieldManagerLastAppliedAnnotation
|
||||||
|
originalAnnotation, hasAnnotation := annots[corev1.LastAppliedConfigAnnotation]
|
||||||
|
|
||||||
|
// If the annotation does not already exist, we do not do anything
|
||||||
|
if !hasAnnotation {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is already an SSA field manager which owns the field, then there
|
||||||
|
// is nothing to do here.
|
||||||
|
if owners := csaupgrade.FindFieldsOwners(
|
||||||
|
accessor.GetManagedFields(),
|
||||||
|
metav1.ManagedFieldsOperationApply,
|
||||||
|
lastAppliedAnnotationFieldPath,
|
||||||
|
); len(owners) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
justAnnotation := &unstructured.Unstructured{}
|
||||||
|
justAnnotation.SetGroupVersionKind(info.Mapping.GroupVersionKind)
|
||||||
|
justAnnotation.SetName(accessor.GetName())
|
||||||
|
justAnnotation.SetNamespace(accessor.GetNamespace())
|
||||||
|
justAnnotation.SetAnnotations(map[string]string{
|
||||||
|
corev1.LastAppliedConfigAnnotation: originalAnnotation,
|
||||||
|
})
|
||||||
|
|
||||||
|
modified, err := runtime.Encode(unstructured.UnstructuredJSONScheme, justAnnotation)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
helperCopy := *helper
|
||||||
|
newObj, err := helperCopy.WithFieldManager(fieldManager).Patch(
|
||||||
|
info.Namespace,
|
||||||
|
info.Name,
|
||||||
|
types.ApplyPatchType,
|
||||||
|
modified,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return info.Refresh(newObj, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the returned object needs to have its kubectl-client-side-apply
|
||||||
|
// managed fields migrated server-side-apply.
|
||||||
|
//
|
||||||
|
// field ownership metadata is stored in three places:
|
||||||
|
// - server-side managed fields
|
||||||
|
// - client-side managed fields
|
||||||
|
// - and the last_applied_configuration annotation.
|
||||||
|
//
|
||||||
|
// The migration merges the client-side-managed fields into the
|
||||||
|
// server-side-managed fields, leaving the last_applied_configuration
|
||||||
|
// annotation in place. Server will keep the annotation up to date
|
||||||
|
// after every server-side-apply where the following conditions are ment:
|
||||||
|
//
|
||||||
|
// 1. field manager is 'kubectl'
|
||||||
|
// 2. annotation already exists
|
||||||
|
func (o *ApplyOptions) migrateToSSAIfNecessary(
|
||||||
|
helper *resource.Helper,
|
||||||
|
info *resource.Info,
|
||||||
|
) (migrated bool, err error) {
|
||||||
|
accessor, err := meta.Accessor(info.Object)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// To determine which field managers were used by kubectl for client-side-apply
|
||||||
|
// we search for a manager used in `Update` operations which owns the
|
||||||
|
// last-applied-annotation.
|
||||||
|
//
|
||||||
|
// This is the last client-side-apply manager which changed the field.
|
||||||
|
//
|
||||||
|
// There may be multiple owners if multiple managers wrote the same exact
|
||||||
|
// configuration. In this case there are multiple owners, we want to migrate
|
||||||
|
// them all.
|
||||||
|
csaManagers := csaupgrade.FindFieldsOwners(
|
||||||
|
accessor.GetManagedFields(),
|
||||||
|
metav1.ManagedFieldsOperationUpdate,
|
||||||
|
lastAppliedAnnotationFieldPath)
|
||||||
|
|
||||||
|
managerNames := sets.New[string]()
|
||||||
|
for _, entry := range csaManagers {
|
||||||
|
managerNames.Insert(entry.Manager)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-attempt patch as many times as it is conflicting due to ResourceVersion
|
||||||
|
// test failing
|
||||||
|
for i := 0; i < maxPatchRetry; i++ {
|
||||||
|
var patchData []byte
|
||||||
|
var obj runtime.Object
|
||||||
|
|
||||||
|
patchData, err = csaupgrade.UpgradeManagedFieldsPatch(
|
||||||
|
info.Object, managerNames, o.FieldManager)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// If patch generation failed there was likely a bug.
|
||||||
|
return false, err
|
||||||
|
} else if patchData == nil {
|
||||||
|
// nil patch data means nothing to do - object is already migrated
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the patch to upgrade the managed fields if it is non-nil
|
||||||
|
obj, err = helper.Patch(
|
||||||
|
info.Namespace,
|
||||||
|
info.Name,
|
||||||
|
types.JSONPatchType,
|
||||||
|
patchData,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
// Stop retrying upon success.
|
||||||
|
info.Refresh(obj, false)
|
||||||
|
return true, nil
|
||||||
|
} else if !errors.IsConflict(err) {
|
||||||
|
// Only retry if there was a conflict
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh the object for next iteration
|
||||||
|
err = info.Get()
|
||||||
|
if err != nil {
|
||||||
|
// If there was an error fetching, return error
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reaching this point with non-nil error means there was a conflict and
|
||||||
|
// max retries was hit
|
||||||
|
// Return the last error witnessed (which will be a conflict)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
func (o *ApplyOptions) shouldPrintObject() bool {
|
func (o *ApplyOptions) shouldPrintObject() bool {
|
||||||
// Print object only if output format other than "name" is specified
|
// Print object only if output format other than "name" is specified
|
||||||
shouldPrint := false
|
shouldPrint := false
|
||||||
@ -766,6 +982,16 @@ const (
|
|||||||
// for backward compatibility to not conflict with old versions
|
// for backward compatibility to not conflict with old versions
|
||||||
// of kubectl server-side apply where `kubectl` has already been the field manager.
|
// of kubectl server-side apply where `kubectl` has already been the field manager.
|
||||||
fieldManagerServerSideApply = "kubectl"
|
fieldManagerServerSideApply = "kubectl"
|
||||||
|
|
||||||
|
fieldManagerLastAppliedAnnotation = "kubectl-last-applied"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
lastAppliedAnnotationFieldPath = fieldpath.NewSet(
|
||||||
|
fieldpath.MakePathOrDie(
|
||||||
|
"metadata", "annotations",
|
||||||
|
corev1.LastAppliedConfigAnnotation),
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetApplyFieldManagerFlag gets the field manager for kubectl apply
|
// GetApplyFieldManagerFlag gets the field manager for kubectl apply
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
|
|
||||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
@ -40,6 +41,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing"
|
sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing"
|
||||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||||
"k8s.io/cli-runtime/pkg/resource"
|
"k8s.io/cli-runtime/pkg/resource"
|
||||||
@ -47,6 +49,7 @@ import (
|
|||||||
dynamicfakeclient "k8s.io/client-go/dynamic/fake"
|
dynamicfakeclient "k8s.io/client-go/dynamic/fake"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/rest/fake"
|
"k8s.io/client-go/rest/fake"
|
||||||
|
"k8s.io/client-go/util/csaupgrade"
|
||||||
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
|
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||||
"k8s.io/kubectl/pkg/scheme"
|
"k8s.io/kubectl/pkg/scheme"
|
||||||
@ -185,6 +188,7 @@ const (
|
|||||||
filenameRCLastAppliedArgs = "../../../testdata/apply/rc-lastapplied-args.yaml"
|
filenameRCLastAppliedArgs = "../../../testdata/apply/rc-lastapplied-args.yaml"
|
||||||
filenameRCNoAnnotation = "../../../testdata/apply/rc-no-annotation.yaml"
|
filenameRCNoAnnotation = "../../../testdata/apply/rc-no-annotation.yaml"
|
||||||
filenameRCLASTAPPLIED = "../../../testdata/apply/rc-lastapplied.yaml"
|
filenameRCLASTAPPLIED = "../../../testdata/apply/rc-lastapplied.yaml"
|
||||||
|
filenameRCManagedFieldsLA = "../../../testdata/apply/rc-managedfields-lastapplied.yaml"
|
||||||
filenameSVC = "../../../testdata/apply/service.yaml"
|
filenameSVC = "../../../testdata/apply/service.yaml"
|
||||||
filenameRCSVC = "../../../testdata/apply/rc-service.yaml"
|
filenameRCSVC = "../../../testdata/apply/rc-service.yaml"
|
||||||
filenameNoExistRC = "../../../testdata/apply/rc-noexist.yaml"
|
filenameNoExistRC = "../../../testdata/apply/rc-noexist.yaml"
|
||||||
@ -710,6 +714,157 @@ func TestApplyPruneObjects(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that apply of object in need of CSA migration results in a call
|
||||||
|
// to patch it.
|
||||||
|
func TestApplyCSAMigration(t *testing.T) {
|
||||||
|
cmdtesting.InitTestErrorHandler(t)
|
||||||
|
nameRC, rcWithManagedFields := readAndAnnotateReplicationController(t, filenameRCManagedFieldsLA)
|
||||||
|
pathRC := "/namespaces/test/replicationcontrollers/" + nameRC
|
||||||
|
|
||||||
|
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||||
|
defer tf.Cleanup()
|
||||||
|
|
||||||
|
// The object after patch should be equivalent to the output of
|
||||||
|
// csaupgrade.UpgradeManagedFields
|
||||||
|
//
|
||||||
|
// Parse object into unstructured, apply patch
|
||||||
|
postPatchObj := &unstructured.Unstructured{}
|
||||||
|
err := json.Unmarshal(rcWithManagedFields, &postPatchObj.Object)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedPatch, err := csaupgrade.UpgradeManagedFieldsPatch(postPatchObj, sets.New(FieldManagerClientSideApply), "kubectl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = csaupgrade.UpgradeManagedFields(postPatchObj, sets.New("kubectl-client-side-apply"), "kubectl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
postPatchData, err := json.Marshal(postPatchObj)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patches := 0
|
||||||
|
targetPatches := 2
|
||||||
|
applies := 0
|
||||||
|
|
||||||
|
tf.UnstructuredClient = &fake.RESTClient{
|
||||||
|
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
|
||||||
|
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||||
|
switch p, m := req.URL.Path, req.Method; {
|
||||||
|
case p == pathRC && m == "GET":
|
||||||
|
// During retry loop for patch fetch is performed.
|
||||||
|
// keep returning the unchanged data
|
||||||
|
if patches < targetPatches {
|
||||||
|
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("should not do a fetch in serverside-apply")
|
||||||
|
return nil, nil
|
||||||
|
case p == pathRC && m == "PATCH":
|
||||||
|
if got := req.Header.Get("Content-Type"); got == string(types.ApplyPatchType) {
|
||||||
|
defer func() {
|
||||||
|
applies += 1
|
||||||
|
}()
|
||||||
|
|
||||||
|
switch applies {
|
||||||
|
case 0:
|
||||||
|
// initial apply.
|
||||||
|
// Just return the same object but with managed fields
|
||||||
|
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||||
|
case 1:
|
||||||
|
// Second apply should include only last apply annotation unmodified
|
||||||
|
// Return new object
|
||||||
|
// NOTE: on a real server this would also modify the managed fields
|
||||||
|
// just return the same object unmodified. It is not so important
|
||||||
|
// for this test for the last-applied to appear in new field
|
||||||
|
// manager response, only that the client asks the server to do it
|
||||||
|
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||||
|
case 2, 3:
|
||||||
|
// Before the last apply we have formed our JSONPAtch so it
|
||||||
|
// should reply now with the upgraded object
|
||||||
|
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||||
|
default:
|
||||||
|
require.Fail(t, "sent more apply requests than expected")
|
||||||
|
return &http.Response{StatusCode: http.StatusBadRequest, Header: cmdtesting.DefaultHeader()}, nil
|
||||||
|
}
|
||||||
|
} else if got == string(types.JSONPatchType) {
|
||||||
|
defer func() {
|
||||||
|
patches += 1
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Require that the patch is equal to what is expected
|
||||||
|
body, err := ioutil.ReadAll(req.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expectedPatch, body)
|
||||||
|
|
||||||
|
switch patches {
|
||||||
|
case targetPatches - 1:
|
||||||
|
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
|
||||||
|
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||||
|
default:
|
||||||
|
// Return conflict until the client has retried enough times
|
||||||
|
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader()}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Fatalf("unexpected content-type: %s\n", got)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
tf.OpenAPISchemaFunc = FakeOpenAPISchema.OpenAPISchemaFn
|
||||||
|
tf.FakeOpenAPIGetter = FakeOpenAPISchema.OpenAPIGetter
|
||||||
|
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||||
|
|
||||||
|
ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams()
|
||||||
|
cmd := NewCmdApply("kubectl", tf, ioStreams)
|
||||||
|
cmd.Flags().Set("filename", filenameRC)
|
||||||
|
cmd.Flags().Set("output", "yaml")
|
||||||
|
cmd.Flags().Set("server-side", "true")
|
||||||
|
cmd.Flags().Set("show-managed-fields", "true")
|
||||||
|
cmd.Run(cmd, []string{})
|
||||||
|
|
||||||
|
// JSONPatch should have been attempted exactly the given number of times
|
||||||
|
require.Equal(t, targetPatches, patches, "should retry as many times as a conflict was returned")
|
||||||
|
require.Equal(t, 3, applies, "should perform specified # of apply calls upon migration")
|
||||||
|
require.Empty(t, errBuf.String())
|
||||||
|
|
||||||
|
// ensure that in the future there will be no migrations necessary
|
||||||
|
// (by showing migration is a no-op)
|
||||||
|
|
||||||
|
rc := &corev1.ReplicationController{}
|
||||||
|
if err := runtime.DecodeInto(codec, buf.Bytes(), rc); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
upgradedRC := rc.DeepCopyObject()
|
||||||
|
err = csaupgrade.UpgradeManagedFields(upgradedRC, sets.New("kubectl-client-side-apply"), "kubectl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, rc.ManagedFields)
|
||||||
|
require.Equal(t, rc, upgradedRC, "upgrading should be no-op in future")
|
||||||
|
|
||||||
|
// Apply the upgraded object.
|
||||||
|
// Expect only a single PATCH call to apiserver
|
||||||
|
ioStreams, _, _, errBuf = genericclioptions.NewTestIOStreams()
|
||||||
|
cmd = NewCmdApply("kubectl", tf, ioStreams)
|
||||||
|
cmd.Flags().Set("filename", filenameRC)
|
||||||
|
cmd.Flags().Set("output", "yaml")
|
||||||
|
cmd.Flags().Set("server-side", "true")
|
||||||
|
cmd.Flags().Set("show-managed-fields", "true")
|
||||||
|
cmd.Run(cmd, []string{})
|
||||||
|
|
||||||
|
require.Empty(t, errBuf)
|
||||||
|
require.Equal(t, 4, applies, "only a single call to server-side apply should have been performed")
|
||||||
|
require.Equal(t, targetPatches, patches, "no more json patches should have been needed")
|
||||||
|
}
|
||||||
|
|
||||||
func TestApplyObjectOutput(t *testing.T) {
|
func TestApplyObjectOutput(t *testing.T) {
|
||||||
cmdtesting.InitTestErrorHandler(t)
|
cmdtesting.InitTestErrorHandler(t)
|
||||||
nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC)
|
nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC)
|
||||||
|
102
staging/src/k8s.io/kubectl/testdata/apply/rc-managedfields-lastapplied.yaml
vendored
Normal file
102
staging/src/k8s.io/kubectl/testdata/apply/rc-managedfields-lastapplied.yaml
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ReplicationController
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/last-applied-configuration: |
|
||||||
|
{"apiVersion":"v1","kind":"ReplicationController","metadata":{"annotations":{},"labels":{"name":"test-rc"},"name":"test-rc","namespace":"test"},"spec":{"replicas":1,"template":{"metadata":{"labels":{"name":"test-rc"}},"spec":{"containers":[{"image":"nginx","name":"test-rc","ports":[{"containerPort":80}]}]}}}}
|
||||||
|
creationTimestamp: "2022-10-06T20:46:22Z"
|
||||||
|
generation: 1
|
||||||
|
labels:
|
||||||
|
name: test-rc
|
||||||
|
managedFields:
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:status:
|
||||||
|
f:fullyLabeledReplicas: {}
|
||||||
|
f:observedGeneration: {}
|
||||||
|
f:replicas: {}
|
||||||
|
manager: kube-controller-manager
|
||||||
|
operation: Update
|
||||||
|
subresource: status
|
||||||
|
time: "2022-10-06T20:46:22Z"
|
||||||
|
- apiVersion: v1
|
||||||
|
fieldsType: FieldsV1
|
||||||
|
fieldsV1:
|
||||||
|
f:metadata:
|
||||||
|
f:annotations:
|
||||||
|
.: {}
|
||||||
|
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
f:replicas: {}
|
||||||
|
f:selector: {}
|
||||||
|
f:template:
|
||||||
|
.: {}
|
||||||
|
f:metadata:
|
||||||
|
.: {}
|
||||||
|
f:creationTimestamp: {}
|
||||||
|
f:labels:
|
||||||
|
.: {}
|
||||||
|
f:name: {}
|
||||||
|
f:spec:
|
||||||
|
.: {}
|
||||||
|
f:containers:
|
||||||
|
.: {}
|
||||||
|
k:{"name":"test-rc"}:
|
||||||
|
.: {}
|
||||||
|
f:image: {}
|
||||||
|
f:imagePullPolicy: {}
|
||||||
|
f:name: {}
|
||||||
|
f:ports:
|
||||||
|
.: {}
|
||||||
|
k:{"containerPort":80,"protocol":"TCP"}:
|
||||||
|
.: {}
|
||||||
|
f:containerPort: {}
|
||||||
|
f:protocol: {}
|
||||||
|
f:resources: {}
|
||||||
|
f:terminationMessagePath: {}
|
||||||
|
f:terminationMessagePolicy: {}
|
||||||
|
f:dnsPolicy: {}
|
||||||
|
f:restartPolicy: {}
|
||||||
|
f:schedulerName: {}
|
||||||
|
f:securityContext: {}
|
||||||
|
f:terminationGracePeriodSeconds: {}
|
||||||
|
manager: kubectl-client-side-apply
|
||||||
|
operation: Update
|
||||||
|
time: "2022-10-06T20:46:22Z"
|
||||||
|
name: test-rc
|
||||||
|
namespace: test
|
||||||
|
resourceVersion: "290"
|
||||||
|
uid: ad68b34c-d6c5-4d09-b50d-ef49c109778d
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
name: test-rc
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
name: test-rc
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: nginx
|
||||||
|
imagePullPolicy: Always
|
||||||
|
name: test-rc
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
protocol: TCP
|
||||||
|
resources: {}
|
||||||
|
terminationMessagePath: /dev/termination-log
|
||||||
|
terminationMessagePolicy: File
|
||||||
|
dnsPolicy: ClusterFirst
|
||||||
|
restartPolicy: Always
|
||||||
|
schedulerName: default-scheduler
|
||||||
|
securityContext: {}
|
||||||
|
terminationGracePeriodSeconds: 30
|
||||||
|
status:
|
||||||
|
fullyLabeledReplicas: 1
|
||||||
|
observedGeneration: 1
|
||||||
|
replicas: 1
|
@ -438,6 +438,165 @@ run_kubectl_server_side_apply_tests() {
|
|||||||
# clean-up
|
# clean-up
|
||||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||||
|
|
||||||
|
# Test apply migration
|
||||||
|
|
||||||
|
# Create a configmap in the cluster with client-side apply:
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
legacy: unused
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
|
||||||
|
kube::test::if_has_string "${output_message}" 'configmap/test created'
|
||||||
|
|
||||||
|
# Apply the same manifest with --server-side flag, as per server-side-apply migration instructions:
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
legacy: unused
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
|
||||||
|
kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
|
||||||
|
|
||||||
|
# Apply the object a third time using server-side-apply, but this time removing
|
||||||
|
# a field and adding a field. Old versions of kubectl would not allow the field
|
||||||
|
# to be removed
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
ssaKey: ssaValue
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
|
||||||
|
kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
|
||||||
|
|
||||||
|
# Fetch the object and check to see that it does not have a field 'legacy'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.legacy }}" '<no value>'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
|
||||||
|
|
||||||
|
# CSA the object after it has been server-side-applied and had a field removed
|
||||||
|
# Add new key with client-side-apply. Also removes the field from server-side-apply
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
newKey: newValue
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" 'newValue'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" '<no value>'
|
||||||
|
|
||||||
|
# SSA the object without the field added above by CSA. Show that the object
|
||||||
|
# on the server has had the field removed
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
ssaKey: ssaValue
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fetch the object and check to see that it does not have a field 'newKey'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" '<no value>'
|
||||||
|
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
|
||||||
|
|
||||||
|
# Show that kubectl diff --server-side also functions after a migration
|
||||||
|
output_message=$(kubectl diff "${kube_flags[@]:?}" --server-side -f - << __EOF__ || test $? -eq 1
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
annotations:
|
||||||
|
newAnnotation: newValue
|
||||||
|
data:
|
||||||
|
key: value
|
||||||
|
newKey: newValue
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
kube::test::if_has_string "${output_message}" '+ newKey: newValue'
|
||||||
|
kube::test::if_has_string "${output_message}" '+ newAnnotation: newValue'
|
||||||
|
|
||||||
|
# clean-up
|
||||||
|
kubectl "${kube_flags[@]:?}" delete configmap test
|
||||||
|
|
||||||
|
## Test to show that supplying a custom field manager to kubectl apply
|
||||||
|
# does not prevent migration from client-side-apply to server-side-apply
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false --field-manager=myfm -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
key: value1
|
||||||
|
legacy: value2
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: ssa-test
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
kube::test::if_has_string "$output_message" "configmap/ssa-test created"
|
||||||
|
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
|
||||||
|
|
||||||
|
# show that after client-side applying with a custom field manager, the
|
||||||
|
# last-applied-annotation is present
|
||||||
|
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")"
|
||||||
|
|
||||||
|
# Migrate to server-side-apply by applying the same object
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
key: value1
|
||||||
|
legacy: value2
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: ssa-test
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
|
||||||
|
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
|
||||||
|
|
||||||
|
# show that after migrating to SSA with a custom field manager, the
|
||||||
|
# last-applied-annotation is dropped
|
||||||
|
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")" || exit 1
|
||||||
|
|
||||||
|
# Change a field without having any conflict and also drop a field in the same patch
|
||||||
|
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
key: value2
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: ssa-test
|
||||||
|
__EOF__
|
||||||
|
)
|
||||||
|
kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
|
||||||
|
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value2'
|
||||||
|
kube::test::get_object_assert "configmap ssa-test" "{{ .data.legacy }}" '<no value>'
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
kubectl delete configmap ssa-test
|
||||||
|
|
||||||
## kubectl apply dry-run on CR
|
## kubectl apply dry-run on CR
|
||||||
# Create CRD
|
# Create CRD
|
||||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||||
|
1
vendor/modules.txt
vendored
1
vendor/modules.txt
vendored
@ -1908,6 +1908,7 @@ k8s.io/client-go/util/cert
|
|||||||
k8s.io/client-go/util/certificate
|
k8s.io/client-go/util/certificate
|
||||||
k8s.io/client-go/util/certificate/csr
|
k8s.io/client-go/util/certificate/csr
|
||||||
k8s.io/client-go/util/connrotation
|
k8s.io/client-go/util/connrotation
|
||||||
|
k8s.io/client-go/util/csaupgrade
|
||||||
k8s.io/client-go/util/exec
|
k8s.io/client-go/util/exec
|
||||||
k8s.io/client-go/util/flowcontrol
|
k8s.io/client-go/util/flowcontrol
|
||||||
k8s.io/client-go/util/homedir
|
k8s.io/client-go/util/homedir
|
||||||
|
Loading…
Reference in New Issue
Block a user