mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-11-27 19:16:16 +00:00
Respect controllers on PVCs for retention policy
This commit is contained in:
@@ -220,7 +220,7 @@ func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(ctx context.Context, s
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
|
||||
default:
|
||||
if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
|
||||
if !isClaimOwnerUpToDate(logger, claim, set, pod) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -242,14 +242,16 @@ func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(ctx context.Cont
|
||||
case err != nil:
|
||||
return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
|
||||
default:
|
||||
if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
|
||||
if hasUnexpectedController(claim, set, pod) {
|
||||
// Add an event so the user knows they're in a strange configuration. The claim will be cleaned up below.
|
||||
msg := fmt.Sprintf("PersistentVolumeClaim %s has a conflicting OwnerReference that acts as a manging controller, the retention policy is ignored for this claim", claimName)
|
||||
spc.recorder.Event(set, v1.EventTypeWarning, "ConflictingController", msg)
|
||||
}
|
||||
if !isClaimOwnerUpToDate(logger, claim, set, pod) {
|
||||
claim = claim.DeepCopy() // Make a copy so we don't mutate the shared cache.
|
||||
needsUpdate := updateClaimOwnerRefForSetAndPod(logger, claim, set, pod)
|
||||
if needsUpdate {
|
||||
err := spc.objectMgr.UpdateClaim(claim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err)
|
||||
}
|
||||
updateClaimOwnerRefForSetAndPod(logger, claim, set, pod)
|
||||
if err := spc.objectMgr.UpdateClaim(claim); err != nil {
|
||||
return fmt.Errorf("could not update claim %s for delete policy ownerRefs: %w", claimName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -275,8 +277,7 @@ func (spc *StatefulPodControl) PodClaimIsStale(set *apps.StatefulSet, pod *v1.Po
|
||||
case err != nil:
|
||||
return false, err
|
||||
case err == nil:
|
||||
// A claim is stale if it doesn't match the pod's UID, including if the pod has no UID.
|
||||
if hasStaleOwnerRef(pvc, pod) {
|
||||
if hasStaleOwnerRef(pvc, pod, podKind) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/apps/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestStatefulPodControlCreatesPods(t *testing.T) {
|
||||
@@ -502,7 +503,7 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatefulPodControlClaimsMatchDeletionPolcy(t *testing.T) {
|
||||
// The claimOwnerMatchesSetAndPod is tested exhaustively in stateful_set_utils_test; this
|
||||
// The isClaimOwnerUpToDate is tested exhaustively in stateful_set_utils_test; this
|
||||
// test is for the wiring to the method tested there.
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
fakeClient := &fake.Clientset{}
|
||||
@@ -542,38 +543,64 @@ func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) {
|
||||
testFn := func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)
|
||||
fakeClient := &fake.Clientset{}
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
|
||||
fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
update := action.(core.UpdateAction)
|
||||
indexer.Update(update.GetObject())
|
||||
return true, update.GetObject(), nil
|
||||
})
|
||||
set := newStatefulSet(3)
|
||||
set.GetObjectMeta().SetUID("set-123")
|
||||
pod := newStatefulSetPod(set, 0)
|
||||
claims := getPersistentVolumeClaims(set, pod)
|
||||
for k := range claims {
|
||||
claim := claims[k]
|
||||
indexer.Add(&claim)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
ownerRef []metav1.OwnerReference
|
||||
expectRef bool
|
||||
}{
|
||||
{
|
||||
name: "bare PVC",
|
||||
expectRef: true,
|
||||
},
|
||||
{
|
||||
name: "PVC already controller",
|
||||
ownerRef: []metav1.OwnerReference{{Controller: ptr.To(true), Name: "foobar"}},
|
||||
expectRef: false,
|
||||
},
|
||||
}
|
||||
control := NewStatefulPodControl(fakeClient, nil, claimLister, &noopRecorder{})
|
||||
set.Spec.PersistentVolumeClaimRetentionPolicy = &apps.StatefulSetPersistentVolumeClaimRetentionPolicy{
|
||||
WhenDeleted: apps.DeletePersistentVolumeClaimRetentionPolicyType,
|
||||
WhenScaled: apps.RetainPersistentVolumeClaimRetentionPolicyType,
|
||||
}
|
||||
if err := control.UpdatePodClaimForRetentionPolicy(ctx, set, pod); err != nil {
|
||||
t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain): %v", err)
|
||||
}
|
||||
expectRef := utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC)
|
||||
for k := range claims {
|
||||
claim, err := claimLister.PersistentVolumeClaims(claims[k].Namespace).Get(claims[k].Name)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err)
|
||||
|
||||
for _, tc := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
|
||||
fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
update := action.(core.UpdateAction)
|
||||
if err := indexer.Update(update.GetObject()); err != nil {
|
||||
t.Fatalf("could not update index: %v", err)
|
||||
}
|
||||
return true, update.GetObject(), nil
|
||||
})
|
||||
set := newStatefulSet(3)
|
||||
set.GetObjectMeta().SetUID("set-123")
|
||||
pod0 := newStatefulSetPod(set, 0)
|
||||
claims0 := getPersistentVolumeClaims(set, pod0)
|
||||
for k := range claims0 {
|
||||
claim := claims0[k]
|
||||
if tc.ownerRef != nil {
|
||||
claim.SetOwnerReferences(tc.ownerRef)
|
||||
}
|
||||
if err := indexer.Add(&claim); err != nil {
|
||||
t.Errorf("Could not add claim %s: %v", k, err)
|
||||
}
|
||||
}
|
||||
if hasOwnerRef(claim, set) != expectRef {
|
||||
t.Errorf("Claim %s/%s bad set owner ref", claim.Namespace, claim.Name)
|
||||
control := NewStatefulPodControl(fakeClient, nil, claimLister, &noopRecorder{})
|
||||
set.Spec.PersistentVolumeClaimRetentionPolicy = &apps.StatefulSetPersistentVolumeClaimRetentionPolicy{
|
||||
WhenDeleted: apps.DeletePersistentVolumeClaimRetentionPolicyType,
|
||||
WhenScaled: apps.RetainPersistentVolumeClaimRetentionPolicyType,
|
||||
}
|
||||
if err := control.UpdatePodClaimForRetentionPolicy(ctx, set, pod0); err != nil {
|
||||
t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain), pod0: %v", err)
|
||||
}
|
||||
expectRef := tc.expectRef && utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC)
|
||||
for k := range claims0 {
|
||||
claim, err := claimLister.PersistentVolumeClaims(claims0[k].Namespace).Get(claims0[k].Name)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err)
|
||||
}
|
||||
if hasOwnerRef(claim, set) != expectRef {
|
||||
t.Errorf("%s: Claim %s/%s bad set owner ref", tc.name, claim.Namespace, claim.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -663,12 +690,22 @@ func TestPodClaimIsStale(t *testing.T) {
|
||||
claimIndexer.Add(&claim)
|
||||
case stale:
|
||||
claim.SetOwnerReferences([]metav1.OwnerReference{
|
||||
{Name: "set-3", UID: types.UID("stale")},
|
||||
{
|
||||
Name: "set-3",
|
||||
UID: types.UID("stale"),
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
})
|
||||
claimIndexer.Add(&claim)
|
||||
case withRef:
|
||||
claim.SetOwnerReferences([]metav1.OwnerReference{
|
||||
{Name: "set-3", UID: types.UID("123")},
|
||||
{
|
||||
Name: "set-3",
|
||||
UID: types.UID("123"),
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
})
|
||||
claimIndexer.Add(&claim)
|
||||
}
|
||||
@@ -710,7 +747,8 @@ func TestStatefulPodControlRetainDeletionPolicyUpdate(t *testing.T) {
|
||||
}
|
||||
for k := range claims {
|
||||
claim := claims[k]
|
||||
setOwnerRef(&claim, set, &set.TypeMeta) // This ownerRef should be removed in the update.
|
||||
// This ownerRef should be removed in the update.
|
||||
claim.SetOwnerReferences(addControllerRef(claim.GetOwnerReferences(), set, controllerKind))
|
||||
claimIndexer.Add(&claim)
|
||||
}
|
||||
control := NewStatefulPodControl(fakeClient, podLister, claimLister, recorder)
|
||||
|
||||
@@ -49,6 +49,9 @@ import (
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
var controllerKind = apps.SchemeGroupVersion.WithKind("StatefulSet")
|
||||
|
||||
// podKind contains the schema.GroupVersionKind for pods.
|
||||
var podKind = v1.SchemeGroupVersion.WithKind("Pod")
|
||||
|
||||
// StatefulSetController controls statefulsets.
|
||||
type StatefulSetController struct {
|
||||
// client interface
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -170,9 +171,100 @@ func getPersistentVolumeClaimRetentionPolicy(set *apps.StatefulSet) apps.Statefu
|
||||
return policy
|
||||
}
|
||||
|
||||
// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the
|
||||
// matchesRef returns true when the object matches the owner reference, that is the name and GVK are the same.
|
||||
func matchesRef(ref *metav1.OwnerReference, obj metav1.Object, gvk schema.GroupVersionKind) bool {
|
||||
return gvk.GroupVersion().String() == ref.APIVersion && gvk.Kind == ref.Kind && ref.Name == obj.GetName()
|
||||
}
|
||||
|
||||
// hasUnexpectedController returns true if the set has a retention policy and there is a controller
|
||||
// for the claim that's not the set or pod. Since the retention policy may have been changed, it is
|
||||
// always valid for the set or pod to be a controller.
|
||||
func hasUnexpectedController(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||
policy := getPersistentVolumeClaimRetentionPolicy(set)
|
||||
const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
|
||||
if policy.WhenScaled == retain && policy.WhenDeleted == retain {
|
||||
// On a retain policy, it's not a problem for different controller to be managing the claims.
|
||||
return false
|
||||
}
|
||||
for _, ownerRef := range claim.GetOwnerReferences() {
|
||||
if matchesRef(&ownerRef, set, controllerKind) {
|
||||
if ownerRef.UID != set.GetUID() {
|
||||
// A UID mismatch means that pods were incorrectly orphaned. Treating this as an unexpected
|
||||
// controller means we won't touch the PVCs (eg, leave it to the garbage collector to clean
|
||||
// up if appropriate).
|
||||
return true
|
||||
}
|
||||
continue // This is us.
|
||||
}
|
||||
|
||||
if matchesRef(&ownerRef, pod, podKind) {
|
||||
if ownerRef.UID != pod.GetUID() {
|
||||
// This is the same situation as the set UID mismatch, above.
|
||||
return true
|
||||
}
|
||||
continue // This is us.
|
||||
}
|
||||
if ownerRef.Controller != nil && *ownerRef.Controller {
|
||||
return true // This is another controller.
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasNonControllerOwner returns true if the pod or set is an owner but not controller of the claim.
|
||||
func hasNonControllerOwner(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||
for _, ownerRef := range claim.GetOwnerReferences() {
|
||||
if ownerRef.UID == set.GetUID() || ownerRef.UID == pod.GetUID() {
|
||||
if ownerRef.Controller == nil || !*ownerRef.Controller {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// removeRefs removes any owner refs from the list matching predicate. Returns true if the list was changed and
|
||||
// the new (or unchanged list).
|
||||
func removeRefs(refs []metav1.OwnerReference, predicate func(ref *metav1.OwnerReference) bool) []metav1.OwnerReference {
|
||||
newRefs := []metav1.OwnerReference{}
|
||||
for _, ownerRef := range refs {
|
||||
if !predicate(&ownerRef) {
|
||||
newRefs = append(newRefs, ownerRef)
|
||||
}
|
||||
}
|
||||
return newRefs
|
||||
}
|
||||
|
||||
// isClaimOwnerUpToDate returns false if the ownerRefs of the claim are not set consistently with the
|
||||
// PVC deletion policy for the StatefulSet.
|
||||
func claimOwnerMatchesSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||
//
|
||||
// If there are stale references or unexpected controllers, this returns true in order to not touch
|
||||
// PVCs that have gotten into this unknown state. Otherwise the ownerships are checked to match the
|
||||
// PVC retention policy:
|
||||
//
|
||||
// Retain on scaling and set deletion: no owner ref
|
||||
// Retain on scaling and delete on set deletion: owner ref on the set only
|
||||
// Delete on scaling and retain on set deletion: owner ref on the pod only
|
||||
// Delete on scaling and set deletion: owner refs on both set and pod.
|
||||
func isClaimOwnerUpToDate(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||
if hasStaleOwnerRef(claim, set, controllerKind) || hasStaleOwnerRef(claim, pod, podKind) {
|
||||
// The claim is being managed by previous, presumably deleted, version of the controller. It should not be touched.
|
||||
return true
|
||||
}
|
||||
|
||||
if hasUnexpectedController(claim, set, pod) {
|
||||
if hasOwnerRef(claim, set) || hasOwnerRef(claim, pod) {
|
||||
return false // Need to clean up the conflicting controllers
|
||||
}
|
||||
// The claim refs are good, we don't want to add any controllers on top of the unexpected one.
|
||||
return true
|
||||
}
|
||||
|
||||
if hasNonControllerOwner(claim, set, pod) {
|
||||
// Some resource has an owner ref, but there is no controller. This needs to be updated.
|
||||
return false
|
||||
}
|
||||
|
||||
policy := getPersistentVolumeClaimRetentionPolicy(set)
|
||||
const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
|
||||
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
@@ -214,64 +306,53 @@ func claimOwnerMatchesSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeCl
|
||||
|
||||
// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of
|
||||
// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise.
|
||||
func updateClaimOwnerRefForSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
|
||||
needsUpdate := false
|
||||
// Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef.
|
||||
// This is the case both in real clusters and the unittests.
|
||||
// TODO: there must be a better way to do this other than hardcoding the pod version?
|
||||
updateMeta := func(tm *metav1.TypeMeta, kind string) {
|
||||
if tm.APIVersion == "" {
|
||||
if kind == "StatefulSet" {
|
||||
tm.APIVersion = "apps/v1"
|
||||
} else {
|
||||
tm.APIVersion = "v1"
|
||||
}
|
||||
}
|
||||
if tm.Kind == "" {
|
||||
tm.Kind = kind
|
||||
}
|
||||
// isClaimOwnerUpToDate should be called before this to avoid an expensive update operation.
|
||||
func updateClaimOwnerRefForSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) {
|
||||
refs := claim.GetOwnerReferences()
|
||||
|
||||
unexpectedController := hasUnexpectedController(claim, set, pod)
|
||||
|
||||
// Scrub any ownerRefs to our set & pod.
|
||||
refs = removeRefs(refs, func(ref *metav1.OwnerReference) bool {
|
||||
return matchesRef(ref, set, controllerKind) || matchesRef(ref, pod, podKind)
|
||||
})
|
||||
|
||||
if unexpectedController {
|
||||
// Leave ownerRefs to our set & pod scrubed and return without creating new ones.
|
||||
claim.SetOwnerReferences(refs)
|
||||
return
|
||||
}
|
||||
podMeta := pod.TypeMeta
|
||||
updateMeta(&podMeta, "Pod")
|
||||
setMeta := set.TypeMeta
|
||||
updateMeta(&setMeta, "StatefulSet")
|
||||
|
||||
policy := getPersistentVolumeClaimRetentionPolicy(set)
|
||||
const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
|
||||
const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
switch {
|
||||
default:
|
||||
logger.Error(nil, "Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
|
||||
fallthrough
|
||||
// Nothing to do
|
||||
case policy.WhenScaled == retain && policy.WhenDeleted == retain:
|
||||
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
||||
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
|
||||
// Nothing to do
|
||||
case policy.WhenScaled == retain && policy.WhenDeleted == delete:
|
||||
needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
|
||||
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
|
||||
refs = addControllerRef(refs, set, controllerKind)
|
||||
case policy.WhenScaled == delete && policy.WhenDeleted == retain:
|
||||
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
||||
podScaledDown := !podInOrdinalRange(pod, set)
|
||||
if podScaledDown {
|
||||
needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
|
||||
}
|
||||
if !podScaledDown {
|
||||
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
|
||||
refs = addControllerRef(refs, pod, podKind)
|
||||
}
|
||||
case policy.WhenScaled == delete && policy.WhenDeleted == delete:
|
||||
podScaledDown := !podInOrdinalRange(pod, set)
|
||||
if podScaledDown {
|
||||
needsUpdate = removeOwnerRef(claim, set) || needsUpdate
|
||||
needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
|
||||
refs = addControllerRef(refs, pod, podKind)
|
||||
}
|
||||
if !podScaledDown {
|
||||
needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
|
||||
needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
|
||||
refs = addControllerRef(refs, set, controllerKind)
|
||||
}
|
||||
}
|
||||
return needsUpdate
|
||||
claim.SetOwnerReferences(refs)
|
||||
}
|
||||
|
||||
// hasOwnerRef returns true if target has an ownerRef to owner.
|
||||
// hasOwnerRef returns true if target has an ownerRef to owner (as its UID).
|
||||
// This does not check if the owner is a controller.
|
||||
func hasOwnerRef(target, owner metav1.Object) bool {
|
||||
ownerUID := owner.GetUID()
|
||||
for _, ownerRef := range target.GetOwnerReferences() {
|
||||
@@ -282,53 +363,28 @@ func hasOwnerRef(target, owner metav1.Object) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale.
|
||||
func hasStaleOwnerRef(target, owner metav1.Object) bool {
|
||||
// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale, that is,
|
||||
// the ref matches the object but not the UID.
|
||||
func hasStaleOwnerRef(target *v1.PersistentVolumeClaim, obj metav1.Object, gvk schema.GroupVersionKind) bool {
|
||||
for _, ownerRef := range target.GetOwnerReferences() {
|
||||
if ownerRef.Name == owner.GetName() && ownerRef.UID != owner.GetUID() {
|
||||
return true
|
||||
if matchesRef(&ownerRef, obj, gvk) {
|
||||
return ownerRef.UID != obj.GetUID()
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// setOwnerRef adds owner to the ownerRefs of target, if necessary. Returns true if target needs to be
|
||||
// updated and false otherwise.
|
||||
func setOwnerRef(target, owner metav1.Object, ownerType *metav1.TypeMeta) bool {
|
||||
if hasOwnerRef(target, owner) {
|
||||
return false
|
||||
}
|
||||
ownerRefs := append(
|
||||
target.GetOwnerReferences(),
|
||||
metav1.OwnerReference{
|
||||
APIVersion: ownerType.APIVersion,
|
||||
Kind: ownerType.Kind,
|
||||
Name: owner.GetName(),
|
||||
UID: owner.GetUID(),
|
||||
})
|
||||
target.SetOwnerReferences(ownerRefs)
|
||||
return true
|
||||
}
|
||||
|
||||
// removeOwnerRef removes owner from the ownerRefs of target, if necessary. Returns true if target needs
|
||||
// to be updated and false otherwise.
|
||||
func removeOwnerRef(target, owner metav1.Object) bool {
|
||||
if !hasOwnerRef(target, owner) {
|
||||
return false
|
||||
}
|
||||
ownerUID := owner.GetUID()
|
||||
oldRefs := target.GetOwnerReferences()
|
||||
newRefs := make([]metav1.OwnerReference, len(oldRefs)-1)
|
||||
skip := 0
|
||||
for i := range oldRefs {
|
||||
if oldRefs[i].UID == ownerUID {
|
||||
skip = -1
|
||||
} else {
|
||||
newRefs[i+skip] = oldRefs[i]
|
||||
// addControllerRef returns refs with owner added as a controller, if necessary.
|
||||
func addControllerRef(refs []metav1.OwnerReference, owner metav1.Object, gvk schema.GroupVersionKind) []metav1.OwnerReference {
|
||||
for _, ref := range refs {
|
||||
if ref.UID == owner.GetUID() {
|
||||
// Already added. Since we scrub our refs before making any changes, we know it's already
|
||||
// a controller if appropriate.
|
||||
return refs
|
||||
}
|
||||
}
|
||||
target.SetOwnerReferences(newRefs)
|
||||
return true
|
||||
|
||||
return append(refs, *metav1.NewControllerRef(owner, gvk))
|
||||
}
|
||||
|
||||
// getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user